content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_current_user():
"""Gets the current logged in user"""
user = User.get_one_by_field('id', value=get_jwt_identity())
response = {
'name': user['name'],
'username': user['username'],
}
return jsonify(response) | ad5df5b9360b92f9cca9b5591ddb76eb67c814b8 | 5,700 |
import time
def create_kv_store(vm_name, vmdk_path, opts):
""" Create the metadata kv store for a volume """
vol_meta = {kv.STATUS: kv.DETACHED,
kv.VOL_OPTS: opts,
kv.CREATED: time.asctime(time.gmtime()),
kv.CREATED_BY: vm_name}
return kv.create(vmdk_path, vol_meta) | d2f14ca7f0ead2baca68db3fb62b0fcce83425cb | 5,701 |
def mapdict(itemfunc, dictionary):
"""
Much like the builtin function 'map', but works on dictionaries.
*itemfunc* should be a function which takes one parameter, a (key,
value) pair, and returns a new (or same) (key, value) pair to go in
the dictionary.
"""
return dict(map(itemfunc, dictionary.items())) | 1f0573410f82acb1f3c06029cf4bfaccd295e1ac | 5,702 |
import logging
def get_backup_temp():
"""
This is the function for if the BMP280 malfunctions
"""
try:
temp = BNO055.temperature
logging.warning("Got backup temperature")
return temp
except RuntimeError:
logging.error("BNO055 not connected")
return get_backup_temp_2()
except Exception as error:
logging.error(error)
temp = get_backup_temp_2()
return temp | 5a573f72ea05889bc0fe48c6b896311423f3c6f1 | 5,703 |
from typing import Counter
def density_matrix(M, row_part, col_part):
"""
Given a sparse matrix M, row labels, and column labels, constructs a block matrix where each entry contains the proportion of 1-entries in the corresponding rows and columns.
"""
m, n = M.shape
if m <= 0 or n <= 0:
raise ValueError("Matrix M has dimensions with 0 or negative value.")
if m != len(row_part):
raise ValueError("Row labels must be the same length as the number of rows in M.")
if n != len(col_part):
raise ValueError("Column labels must be the same length as the number of columns in M.")
row_groups = Counter(row_part).keys()
col_groups = Counter(col_part).keys()
#print row_groups, col_groups
row_part = np.array(row_part)
col_part = np.array(col_part)
row_idx = [np.where(row_part == a)[0] for a in row_groups]
col_idx = [np.where(col_part == b)[0] for b in col_groups]
#print [len(a) for a in row_idx]
#print [len(b) for b in col_idx]
density_matrix = [[np.sum(M[row_idx[i]][:, col_idx[j]]) / float(len(row_idx[i]) * len(col_idx[j])) for j in range(len(col_groups))] for i in range(len(row_groups))]
return density_matrix | 542c6dab2c987902825056f45dd51517446bc6de | 5,704 |
def isAddressInMyAddressBookSubscriptionsListOrWhitelist(address):
"""
Am I subscribed to this address, is it in my addressbook or whitelist?
"""
if isAddressInMyAddressBook(address):
return True
queryreturn = sqlQuery(
'''SELECT address FROM whitelist where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
queryreturn = sqlQuery(
'''select address from subscriptions where address=?'''
''' and enabled = '1' ''',
address)
if queryreturn != []:
return True
return False | 05c7ed302e5edd070b26d3a04e3d29072c6542c8 | 5,705 |
from datetime import datetime
def GetDateTimeFromTimeStamp(timestamp, tzinfo=None):
"""Returns the datetime object for a UNIX timestamp.
Args:
timestamp: A UNIX timestamp in int or float seconds since the epoch
(1970-01-01T00:00:00.000000Z).
tzinfo: A tzinfo object for the timestamp timezone or None for the local
timezone.
Returns:
The datetime object for a UNIX timestamp.
"""
return datetime.datetime.fromtimestamp(timestamp, tzinfo) | c3f224c300c3c1b497d16b92facd8118534e446f | 5,706 |
def success(parsed_args):
"""
:param :py:class:`argparse.Namespace` parsed_args:
:return: Nowcast system message type
:rtype: str
"""
logger.info(
f"FVCOM {parsed_args.model_config} {parsed_args.run_type} run boundary condition "
f'file for {parsed_args.run_date.format("YYYY-MM-DD")} '
f"created on {parsed_args.host_name}"
)
msg_type = f"success {parsed_args.model_config} {parsed_args.run_type}"
return msg_type | 448dda23f35d450049673a41c8bc9042e9387e8c | 5,707 |
def _kv_to_dict(kv_string):
"""
Simple splitting of a key value string to dictionary in "Name: <Key>, Values: [<value>]" form
:param kv_string: String in the form of "key:value"
:return Dictionary of values
"""
dict = {}
if ":" not in kv_string:
log.error(f'Keyvalue parameter not in the form of "key:value"')
raise ValueError
kv = kv_string.split(':')
dict['Name'] = f'tag:{kv[0]}'
dict['Values'] = [kv[1]]
return dict | 5afe7272ec97a69ee8fb18e29e0fda062cfc0152 | 5,708 |
from typing import List
def get_column_names(df: pd.DataFrame) -> List[str]:
"""Get number of particles from the DataFrame, and return a list of column names
Args:
df: DataFrame
Returns:
List of columns (e.g. PID_xx)
"""
c = df.shape[1]
if c <= 0:
raise IndexError("Please ensure the DataFrame isn't empty!")
return ["PID_{0}".format(x + 1) for x in range(c)] | f935d2db8cca04141305b30bb4470f7a6c96012e | 5,709 |
def get_default(schema, key):
"""Get default value for key in voluptuous schema."""
for k in schema.keys():
if k == key:
if k.default == vol.UNDEFINED:
return None
return k.default() | 7a3963984ddbfaf38c75771115a31cfbbaa737e3 | 5,710 |
import sys
def get_font_loader_class():
"""Get the font loader associated to the current platform
Returns:
FontLoader: specialized version of the font loader class.
"""
if "linux" in sys.platform:
return FontLoaderLinux
if "win" in sys.platform:
return FontLoaderWindows
raise NotImplementedError(
"This operating system ({}) is not currently supported".format(sys.platform)
) | 5b5d9b19ae852b5e3fffac89c609067932836c65 | 5,711 |
def _map_tensor_names(original_tensor_name):
"""
Tensor name mapping
"""
global_tensor_map = {
"model/wte": "word_embedder/w",
"model/wpe": "position_embedder/w",
"model/ln_f/b": "transformer_decoder/beta",
"model/ln_f/g": "transformer_decoder/gamma",
}
if original_tensor_name in global_tensor_map:
return global_tensor_map[original_tensor_name]
original_tensor_name_split = original_tensor_name.split('/')
layer_tensor_map = {
"ln_1/b": "beta",
"ln_1/g": "gamma",
"ln_2/b": "past_poswise_ln/beta",
"ln_2/g": "past_poswise_ln/gamma",
"mlp/c_fc/b": "ffn/conv1/bias",
"mlp/c_fc/w": "ffn/conv1/kernel",
"mlp/c_proj/b": "ffn/conv2/bias",
"mlp/c_proj/w": "ffn/conv2/kernel",
"attn/c_proj/b": "self_attention/multihead_attention/output/bias",
"attn/c_proj/w": "self_attention/multihead_attention/output/kernel",
}
layer_num = int(original_tensor_name_split[1][1:])
layer_feature = '/'.join(original_tensor_name.split('/')[2:])
if layer_feature in layer_tensor_map:
layer_feature_ = layer_tensor_map[layer_feature]
tensor_name_ = '/'.join(
[
'transformer_decoder',
'layer_{}'.format(layer_num),
layer_feature_
])
return tensor_name_
else:
return original_tensor_name | 3331d13e667ee3ef363cdeca5122e8a256202c39 | 5,712 |
from clawpack.visclaw import geoplot
from numpy import linspace
from clawpack.visclaw.data import ClawPlotData
from numpy import mod
from pylab import title
from pylab import ticklabel_format, xticks, gca
from pylab import plot
import os
from clawpack.visclaw import plot_timing_stats
def setplot(plotdata=None):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
if plotdata is None:
plotdata = ClawPlotData()
plotdata.clearfigures() # clear any old figures,axes,items data
plotdata.format = 'binary'
def timeformat(t):
hours = int(t/3600.)
tmin = mod(t,3600.)
min = int(tmin/60.)
sec = int(mod(tmin,60.))
timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2))
return timestr
def title_hours(current_data):
t = current_data.t
timestr = timeformat(t)
title('%s after earthquake' % timestr)
#-----------------------------------------
# Figure for surface
#-----------------------------------------
plotfigure = plotdata.new_plotfigure(name='Computational domain', figno=0)
plotfigure.kwargs = {'figsize':(8,7)}
plotfigure.show = True
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes('pcolor')
plotaxes.title = 'Surface'
plotaxes.scaled = True
def aa(current_data):
gca().set_aspect(1.)
title_hours(current_data)
ticklabel_format(useOffset=False)
xticks(rotation=20)
plotaxes.afteraxes = aa
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
#plotitem.plot_var = geoplot.surface
plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.colorbar_shrink = 0.7
plotitem.amr_celledges_show = [0,0,0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.amr_patchedges_show = [0,0,0,0]
plotitem.amr_data_show = [1,1,1,1,1,0,0]
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = linspace(-3000,-3000,1)
plotitem.amr_contour_colors = ['y'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':2}
plotitem.amr_contour_show = [1,0,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
#-----------------------------------------
# Figure for coastal area
#-----------------------------------------
x1,x2,y1,y2 = [-0.005, 0.016, -0.01, 0.01]
plotfigure = plotdata.new_plotfigure(name="coastal area", figno=11)
plotfigure.show = True
plotfigure.kwargs = {'figsize': (6,7)}
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.scaled = False
plotaxes.xlimits = [x1, x2]
plotaxes.ylimits = [y1, y2]
def aa_withbox(current_data):
x1,x2,y1,y2 = (-0.009259, 0.013796, -0.005093, 0.005000)
if current_data.t > 5*60.:
plot([x1,x1,x2,x2,x1], [y1,y2,y2,y1,y1], 'w--')
aa(current_data)
plotaxes.afteraxes = aa_withbox
# Water
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.surface
#plotitem.plot_var = geoplot.surface_or_depth
plotitem.pcolor_cmap = geoplot.tsunami_colormap
plotitem.pcolor_cmin = cmin
plotitem.pcolor_cmax = cmax
plotitem.add_colorbar = True
plotitem.colorbar_shrink = 0.4
plotitem.amr_celledges_show = [0,0,0]
plotitem.patchedges_show = 0
# Land
plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor')
plotitem.plot_var = geoplot.land
plotitem.pcolor_cmap = geoplot.land_colors
plotitem.pcolor_cmin = 0.0
plotitem.pcolor_cmax = cmax_land
plotitem.add_colorbar = False
plotitem.amr_celledges_show = [0]
plotitem.patchedges_show = 0
# add contour lines of bathy if desired:
plotitem = plotaxes.new_plotitem(plot_type='2d_contour')
#plotitem.show = False
plotitem.plot_var = geoplot.topo
plotitem.contour_levels = [-2,-1,0,1,2]
plotitem.amr_contour_colors = ['yellow'] # color on each level
plotitem.kwargs = {'linestyles':'solid','linewidths':1}
plotitem.amr_contour_show = [0,0,1,0]
plotitem.celledges_show = 0
plotitem.patchedges_show = 0
# Plots of timing (CPU and wall time):
def make_timing_plots(plotdata):
try:
timing_plotdir = plotdata.plotdir + '/_timing_figures'
os.system('mkdir -p %s' % timing_plotdir)
units = {'comptime':'hours', 'simtime':'hours', 'cell':'billions'}
plot_timing_stats.make_plots(outdir=plotdata.outdir, make_pngs=True,
plotdir=timing_plotdir, units=units)
os.system('cp %s/timing.* %s' % (plotdata.outdir, timing_plotdir))
except:
print('*** Error making timing plots')
otherfigure = plotdata.new_otherfigure(name='timing',
fname='_timing_figures/timing.html')
otherfigure.makefig = make_timing_plots
#-----------------------------------------
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_gaugenos = 'all' # list of gauges to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html' # pointer for top of index
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 2 # layout of plots
plotdata.latex_framesperline = 1 # layout of plots
plotdata.latex_makepdf = False # also run pdflatex?
plotdata.parallel = True # make multiple frame png's at once
return plotdata | caad86086eb60a83bf9634a326d18f154487785b | 5,713 |
def test_skeleton(opts):
"""
Template of unittest for skeleton.py
:param opts: mapping parameters as dictionary
:return: file content as string
"""
template = get_template("test_skeleton")
return template.substitute(opts) | 99afb92b3cb2054bf85d62760f14108cabc2b579 | 5,714 |
def get_classpath(obj):
"""
Return the full module and class path of the obj. For instance,
kgof.density.IsotropicNormal
Return a string.
"""
return obj.__class__.__module__ + "." + obj.__class__.__name__ | bf986e2b27dd8a216a2cc2cdb2fb2b8a83b361cc | 5,715 |
import json
def bill_content(bill_version: str) -> str:
"""
Returns the bill text, broken down by the way the XML was structured
Args:
bill_version (str): bill_version_id used as a fk on the BillContent table
Returns:
str: String json array of bills
"""
results = get_bill_contents(bill_version)
results = [x.to_dict() for x in results]
return json.dumps(results) | 3bb5ce368a9d789e216926f41dad8c858fd2858c | 5,716 |
def has_default(column: Column) -> bool:
"""Column has server or Sqlalchemy default value."""
if has_server_default(column) or column.default:
return True
else:
return False | d2b0a3d3bdd201f9623c2d9d5587c5526322db54 | 5,717 |
import statistics
def iterations_median(benchmark_result):
"""A function to calculate the median of the amount of iterations.
Parameters
----------
benchmark_result : list of list of list of namedtuple
The result from a benchmark.
Returns
-------
numpy.ndarray
A 2D array containing the median of the amount of iterations for every
algorithm-problem pair. Note that the indices of a certain
algorithm-problem pair in the benchmark_result will be the same as the
indices one needs to get the results for that pair.
"""
return _func_on_data(benchmark_result, statistics.median, 1) | 9f758ec3777303e0a9bddaa2c4f6bd3b48a47bcc | 5,718 |
def _make_list(input_list, proj_ident):
"""Used by other functions, takes input_list and returns a list with items converted"""
if not input_list: return []
output_list = []
for item in input_list:
if item is None:
output_list.append(None)
elif item == '':
output_list.append('')
elif isinstance(item, list):
output_list.append(_make_list(item, proj_ident))
elif isinstance(item, dict):
output_list.append(_make_dictionary(item, proj_ident))
elif item is True:
output_list.append(True)
elif item is False:
output_list.append(False)
elif isinstance(item, skiboot.Ident):
if item.proj == proj_ident:
output_list.append(item.num)
else:
# ident is another project, put the full ident
output_list.append([item.proj, item.num])
else:
output_list.append(str(item))
return output_list | 188bd9cb5d8afdce2ce58326376bc1c71627142c | 5,719 |
import os
def monthly_ndvi():
"""Get monthly NDVI from MOD13C2 products."""
mod13c2_dir = os.path.join(DATA_DIR, 'raw', 'MOD13C2')
months = [m for m in range(1, 13)]
cities = [city.id for city in CASE_STUDIES]
ndvi = pd.DataFrame(index=cities, columns=months)
for city, month in product(CASE_STUDIES, months):
aoi = city.aoi['geometry']
raster_path = os.path.join(
mod13c2_dir, 'mod13c2_{}.tif'.format(str(month).zfill(2))
)
# Mask MOD13C2 data based on the AOI
with rasterio.open(raster_path) as src:
data, _ = mask(src, [aoi], crop=True)
pixels = data[0, :, :].ravel()
# MOD13C2 valid range: -2000 - 10000
pixels = pixels[pixels >= -2000]
pixels = pixels[pixels <= 10000]
# MOD13C2 scale factor: 0.0001
pixels = pixels.astype(np.float)
pixels = pixels * 0.0001
ndvi.at[(city.id, month)] = pixels.mean()
return ndvi | 9be57a809576db720ba562640f7040d822e4212a | 5,720 |
def dvds_s(P, s):
""" Derivative of specific volume [m^3 kg K/ kg kJ]
w.r.t specific entropy at constant pressure"""
T = T_s(P, s)
return dvdT(P, T) / dsdT(P, T) | 4eeb3b50c9347ea34bb6cc781001da61cef2d638 | 5,721 |
from typing import Tuple
def serialize(_cls=None, *, ctor_args: Tuple[str, ...] = ()):
"""Class decorator to register a Proxy class for serialization.
Args:
- ctor_args: names of the attributes to pass to the constructor when deserializing
"""
global _registry
def wrapped(cls):
try:
_serialize = cls._serialize
if not isinstance(_serialize, (tuple, list)):
raise EncodeError(f"Expected tuple or list for _serialize, got {type(_serialize)} for {cls}")
except AttributeError:
cls._serialize = ()
_registry[cls.__name__] = (cls, ctor_args)
return cls
if _cls is None:
return wrapped
else:
return wrapped(_cls) | e79e93569c17f09f156f19a0eb92b326ffbb0f83 | 5,722 |
import subprocess
def runcmd(args):
"""
Run a given program/shell command and return its output.
Error Handling
==============
If the spawned proccess returns a nonzero exit status, it will print the
program's ``STDERR`` to the running Python iterpreter's ``STDERR``, cause
Python to exit with a return status of 1.
"""
proc = subprocess.Popen(
args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if proc.wait() == 1:
print(proc.stdout.read().decode())
die(proc.stderr.read().decode())
return proc.stdout.read() | bd7ea3e7c86992d5051bc3738769e37c5443f46e | 5,723 |
def get_as_by_asn(asn_):
"""Return an AS by id.
Args:
asn: ASN
"""
try:
as_ = Asn.get_by_asn(asn_)
except AsnNotFoundError as e:
raise exceptions.AsnDoesNotExistException(str(e))
return as_ | 7e102894db3ca65795ec3097c3ca4a80e565666b | 5,724 |
def spoken_form(text: str) -> str:
"""Convert ``text`` into a format compatible with speech lists."""
# TODO: Replace numeric digits with spoken digits
return _RE_NONALPHABETIC_CHAR.sub(" ", text.replace("'", " ")).strip() | a816cf64e198b41c75ee4bfe1327c22ed309f340 | 5,725 |
def get_default_group_type():
"""Get the default group type."""
name = CONF.default_group_type
grp_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
grp_type = get_group_type_by_name(ctxt, name)
except exception.GroupTypeNotFoundByName:
# Couldn't find group type with the name in default_group_type
# flag, record this issue and move on
LOG.exception('Default group type is not found. '
'Please check default_group_type config.')
return grp_type | c7077d9dbf90d2d23ac68531c703a793f714f90a | 5,726 |
from typing import Any
from typing import Optional
def toHVal(op: Any, suggestedType: Optional[HdlType]=None):
"""Convert python or hdl value/signal object to hdl value/signal object"""
if isinstance(op, Value) or isinstance(op, SignalItem):
return op
elif isinstance(op, InterfaceBase):
return op._sig
else:
if isinstance(op, int):
if suggestedType is not None:
return suggestedType.fromPy(op)
if op >= 1 << 31:
raise TypeError(
"Number %d is too big to fit in 32 bit integer of HDL"
" use Bits type instead" % op)
elif op < -(1 << 31):
raise TypeError(
"Number %d is too small to fit in 32 bit integer"
" of HDL use Bits type instead" % op)
try:
hType = defaultPyConversions[type(op)]
except KeyError:
hType = None
if hType is None:
raise TypeError("Unknown hardware type for %s" % (op.__class__))
return hType.fromPy(op) | 291ee67b2f3865a4e8bda87c9fd5e2efc098362f | 5,727 |
from typing import Dict
from typing import Any
def _make_readiness_probe(port: int) -> Dict[str, Any]:
"""Generate readiness probe.
Args:
port (int): service port.
Returns:
Dict[str, Any]: readiness probe.
"""
return {
"httpGet": {
"path": "/openmano/tenants",
"port": port,
},
"periodSeconds": 10,
"timeoutSeconds": 5,
"successThreshold": 1,
"failureThreshold": 3,
} | d12f9b91a35a428b9a3949bcfe507f2f84e81a95 | 5,728 |
def compute_embeddings_and_distances_from_region_adjacency(g,info, metric='euclidean', norm_type = 2, n_jobs=1):
"""
This method runs local graph clustering for each node in the region adjacency graph.
Returns the embeddings for each node in a matrix X. Each row corresponds to an embedding
of a node in the region adjacency graph. It also returns the pairwise distance matrix Z.
For example, component Z[i,j] is the distance between nodes i and j.
Parameters
----------
g: GraphLocal
info: list of lists
Each element of the list is another list with two elements.
The first element is the indices of the a segment, while the second element
is the vector representation of that segment.
Parameters (optional)
---------------------
metric: str
Default = 'euclidean'
Metric for measuring distances among nodes.
For details check:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html
norm_type: int
Default = 2
Norm for normalization of the embeddings.
njobs: int
Default = 1
Number of jobs to be run in parallel
Returns
-------
X: csc matrix
The embeddings matrix. Each row corresponds to an embedding of a node in the regiona adjacency graph.
Z: 2D np.ndarray
The pairwise distance matrix Z. For example, component Z[i,j]
is the distance between nodes i and j.
"""
sum_ = 0
JA = [0]
IA = []
A = []
for data in info:
vec = data[1]/np.linalg.norm(data[1],norm_type)
how_many = len(data[0])
sum_ += how_many
JA.append(sum_)
IA.extend(list(data[0]))
A.extend(list(vec))
X = sp.sparse.csc_matrix((A, IA, JA), shape=(g._num_vertices, len(info)))
X = X.transpose()
Z = pairwise_distances(X, metric='euclidean', n_jobs=6)
return X, Z | 324b3b282f87c10a0438130a2d30ad25af18a7ec | 5,729 |
import shlex
def _parse_assoc(lexer: shlex.shlex) -> AssociativeArray:
"""Parse an associative Bash array."""
assert lexer.get_token() == "("
result = {}
while True:
token = lexer.get_token()
assert token != lexer.eof
if token == ")":
break
assert token == "["
key = lexer.get_token()
assert lexer.get_token() == "]"
assert lexer.get_token() == "="
value = _parse_string(lexer.get_token())
result[key] = value
return result | f62c23880972e860c5b0f0d954c5526420ef0926 | 5,730 |
def get_addon_by_name(addon_short_name):
"""get Addon object from Short Name."""
for addon in osf_settings.ADDONS_AVAILABLE:
if addon.short_name == addon_short_name:
return addon | f59b2781343ea34abeaeb5f39b32c3cb00c56bb4 | 5,731 |
def _normpdf(x):
"""Probability density function of a univariate standard Gaussian
distribution with zero mean and unit variance.
"""
return 1.0 / np.sqrt(2.0 * np.pi) * np.exp(-(x * x) / 2.0) | 62088f218630155bbd81f43d5ee78345488d3e57 | 5,732 |
def scalar_mult(x, y):
"""A function that computes the product between complex matrices and scalars,
complex vectors and scalars or two complex scalars.
"""
y = y.to(x)
re = real(x) * real(y) - imag(x) * imag(y)
im = real(x) * imag(y) + imag(x) * real(y)
return to_complex(re, im) | 3ab2eaa8a969684e52b3b38922027e39741197d7 | 5,733 |
import random
def train_trajectory_encoder(trajectories):
"""
Train a fixed neural-network encoder that maps variable-length
trajectories (of states) into fixed length vectors, trained to reconstruct
said trajectories.
Returns TrajectoryEncoder.
Parameters:
trajectories (List of np.ndarray): A list of trajectories, each of shape
(?, D), where D is dimension of a state.
Returns:
encoder (TrajectoryEncoder).
"""
state_dim = trajectories[0].shape[1]
network = TrajectoryEncoder(state_dim)
optimizer = th.optim.Adam(network.parameters())
num_trajectories = len(trajectories)
num_batches_per_epoch = num_trajectories // BATCH_SIZE
# Copy trajectories as we are about to shuffle them in-place
trajectories = [x for x in trajectories]
for epoch in range(EPOCHS):
random.shuffle(trajectories)
total_loss = 0
for batch_i in range(num_batches_per_epoch):
batch_trajectories = trajectories[batch_i * BATCH_SIZE:(batch_i + 1) * BATCH_SIZE]
loss = network.vae_reconstruct_loss(batch_trajectories)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
print("Epoch {}, Avrg loss {}".format(epoch, total_loss / num_batches_per_epoch))
return network | 83e3557841269cde9db969d3944ccccae5c4cb45 | 5,734 |
def flatten_list(x):
"""Flatten a nested list.
Parameters
----------
x : list
nested list of lists to flatten
Returns
-------
x : list
flattened input
"""
if isinstance(x, list):
return [a for i in x for a in flatten_list(i)]
else:
return [x] | 409fc5ee2244426befab9d4af75ba277d5237208 | 5,735 |
def check_vpg_statuses(url, session, verify):
"""
Return a list of VPGs which meet the SLA and a list of those which don't
"""
good, bad = [], []
for vpg in get_api(url, session, "vpgs", verify):
name = vpg['VpgName']
status = vpg_statuses(vpg['Status'])
if status == vpg_statuses.meeting_sla:
good.append(name)
else:
bad.append(name)
return good, bad | 0c3623c89a6879e398f5691f8f8aa0933c055c76 | 5,736 |
def get_hits(adj_matrix, EPSILON = 0.001):
"""[summary]
hubs & authorities calculation
Arguments:
adj_matrix {float[][]} -- [input Adjacent matrix lists like [[1, 0], [0, 1]]
Keyword Arguments
EPSILON {float} -- [factor of change comparision] (default: {0.001})
Returns:
[(float[], float[])] -- [return hubs & authorities]
"""
# initialize to all 1's
is_coverage = False
hubs = np.ones(adj_matrix.shape[0])
authorities = np.ones(adj_matrix.shape[0])
while not is_coverage:
# a = A.T h, h = A a,
new_authorities = np.dot(adj_matrix.T, hubs)
new_hubs = np.dot(adj_matrix, authorities)
# normalize
normalize_auth = lambda x: x / sum(new_authorities)
normalize_hubs = lambda x: x / sum(new_hubs)
new_authorities = normalize_auth(new_authorities)
new_hubs = normalize_hubs(new_hubs)
# check is coverage
diff = abs(sum(new_hubs - hubs) + sum(new_authorities - authorities))
if diff < EPSILON:
is_coverage = True
else:
authorities = new_authorities
hubs = new_hubs
return (new_hubs, new_authorities) | ad9037247e95360e96b8ff4c8ed975d5e0a1f905 | 5,737 |
def quicksort(seq):
"""
seq is a list of unsorted numbers
return a sorted list of numbers
"""
##stop condition:
if len(seq) <= 1:
return seq
##get the next nodes and process the current node --> call the partition
else:
low, pivot, high = partition(seq)
## self-call to get the sorted left and sorted right
## to return the sorted list by concantating the sorted left, pivot, and the sorted right
return quicksort(low) + [pivot] + quicksort(high) | 943b13185ebfe6e44d0f927f9bf6a3a71130619a | 5,738 |
import time
import sys
import traceback
def scan_resource(
location_rid,
scanners,
timeout=DEFAULT_TIMEOUT,
with_timing=False,
with_threading=True,
):
"""
Return a tuple of:
(location, rid, scan_errors, scan_time, scan_results, timings)
by running the `scanners` Scanner objects for the file or directory resource
with id `rid` at `location` provided as a `location_rid` tuple of (location,
rid) for up to `timeout` seconds. If `with_threading` is False, threading is
disabled.
The returned tuple has these values:
- `location` and `rid` are the original arguments.
- `scan_errors` is a list of error strings.
- `scan_results` is a mapping of scan results from all scanners.
- `scan_time` is the duration in seconds to run all scans for this resource.
- `timings` is a mapping of scan {scanner.name: execution time in seconds}
tracking the execution duration each each scan individually.
`timings` is empty unless `with_timing` is True.
All these values MUST be serializable/pickable because of the way multi-
processing/threading works.
"""
scan_time = time()
location, rid = location_rid
results = {}
scan_errors = []
timings = {} if with_timing else None
if not with_threading:
interruptor = fake_interruptible
else:
interruptor = interruptible
# The timeout is a soft deadline for a scanner to stop processing
# and start returning values. The kill timeout is otherwise there
# as a gatekeeper for runaway processes.
# run each scanner in sequence in its own interruptible
for scanner in scanners:
if with_timing:
start = time()
try:
# pass a deadline that the scanner can opt to honor or not
if timeout:
deadline = time() + int(timeout / 2.5)
else:
deadline = sys.maxsize
runner = partial(scanner.function, location, deadline=deadline)
error, values_mapping = interruptor(runner, timeout=timeout)
if error:
msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + error
scan_errors.append(msg)
# the return value of a scanner fun MUST be a mapping
if values_mapping:
results.update(values_mapping)
except Exception:
msg = 'ERROR: for scanner: ' + scanner.name + ':\n' + traceback.format_exc()
scan_errors.append(msg)
finally:
if with_timing:
timings[scanner.name] = time() - start
scan_time = time() - scan_time
return location, rid, scan_errors, scan_time, results, timings | 6309b2d91d39137949fce6f9fe34c9b0a5e90402 | 5,739 |
import numpy as np
def label_generator(df_well, df_tops, column_depth, label_name):
"""
Generate Formation (or other) Labels to Well Dataframe
(useful for machine learning and EDA purpose)
Input:
df_well is your well dataframe (that originally doesn't have the intended label)
df_tops is your label dataframe (this dataframe should ONLY have 2 columns)
1st column is the label name (e.g. formation top names)
2nd column is the depth of each label name
column_depth is the name of depth column on your df_well dataframe
label_name is the name of label that you want to produce (e.g. FM. LABEL)
Output:
df_well is your dataframe that now has the labels (e.g. FM. LABEL)
"""
# generate list of formation depths and top names
fm_tops = df_tops.iloc[:,0]
fm_depths = df_tops.iloc[:,1]
# create FM. LABEL column to well dataframe
# initiate with NaNs
df_well[label_name] = np.full(len(df_well), np.nan)
indexes = []
topnames = []
for j in range(len(fm_depths)):
# search index at which the DEPTH in the well df equals to OR
# larger than the DEPTH of each pick in the pick df
if (df_well[column_depth].iloc[-1] > fm_depths[j]):
index = df_well.index[(df_well[column_depth] >= fm_depths[j])][0]
top = fm_tops[j]
indexes.append(index)
topnames.append(top)
# replace the NaN in the LABEL column of well df
# at the assigned TOP NAME indexes
df_well[label_name].loc[indexes] = topnames
# Finally, using pandas "ffill" to fill all the rows
# with the TOP NAMES
df_well = df_well.fillna(method='ffill')
return df_well | 16336d8faf675940f3eafa4e7ec853751fd0f5d0 | 5,740 |
def tclexec(tcl_code):
"""Run tcl code"""
g[TCL][REQUEST] = tcl_code
g[TCL][RESULT] = tkeval(tcl_code)
return g[TCL][RESULT] | c90504b567390aa662927e8549e065d1c98fcc40 | 5,741 |
import os
def cleanFiles(direct, CWD=os.getcwd()):
"""
removes the year and trailing white space, if there is a year
direct holds the file name for the file of the contents of the directory
@return list of the cleaned data
"""
SUBDIR = CWD + "output/" # change directory to ouput folder
contents = os.listdir(SUBDIR)
LOGDIR = CWD + "log/" # change directory for logging
log = open(f"{LOGDIR}log.txt", "w") # opens log file
for i in range(0, len(contents)):
contents[i] = contents[i].strip("\n") # remove trailing \n
if (
"(" in contents[i] or ")" in contents[i]
): # if '(' or ')'exists in the file name to signify if there is a year
old = contents[i] # holds the name of the movie for logging purposes
contents[i] = contents[i][
:-7
] # truncates the string to remove year and trailing whitespace
log.write(
f"Removing date from {old} -> {contents[i]})\n"
) # writes to the log file
log.close()
return contents | 2a16037ef15d547af8c1b947d96747b2b2d62fd1 | 5,742 |
def test_labels(test_project_data):
"""A list of labels that correspond to SEED_LABELS."""
labels = []
for label in SEED_LABELS:
labels.append(Label.objects.create(name=label, project=test_project_data))
return labels | 9f7477fa313430ad0ca791037823f478327be305 | 5,743 |
def unravel_index_2d(indices, dims):
"""Unravel index, for 2D inputs only.
See Numpy's unravel.
Args:
indices: <int32> [num_elements], coordinates into 2D row-major tensor.
dims: (N, M), dimensions of the 2D tensor.
Returns:
coordinates: <int32> [2, num_elements], row (1st) and column (2nd) indices.
"""
row_inds = tf.floordiv(indices, dims[1])
col_inds = tf.floormod(indices, dims[1])
return tf.stack([row_inds, col_inds], axis=0) | e7de01de80ba39a81600d8054a28def4dd94f564 | 5,744 |
def np_xcycwh_to_xy_min_xy_max(bbox: np.array) -> np.array:
"""
Convert bbox from shape [xc, yc, w, h] to [xmin, ymin, xmax, ymax]
Args:
bbox A (tf.Tensor) list a bbox (n, 4) with n the number of bbox to convert
Returns:
The converted bbox
"""
# convert the bbox from [xc, yc, w, h] to [xmin, ymin, xmax, ymax].
bbox_xy = np.concatenate([bbox[:, :2] - (bbox[:, 2:] / 2), bbox[:, :2] + (bbox[:, 2:] / 2)], axis=-1)
return bbox_xy | 382230768efc625babc8d221a1984950fd3a08eb | 5,745 |
def wmedian(spec, wt, cfwidth=100):
""" Performs a weighted median filtering of a 1d spectrum
Operates using a cumulative sum curve
Parameters
----------
spec : numpy.ndarray
Input 1d spectrum to be filtered
wt : numpy.ndarray
A spectrum of equal length as the input array to provide the weights.
cfwidth : int or float
Window size for the continuum filter, for the SVD computation.
Default to 100.
"""
# ignore the warning (feature not a bug)
old_settings = np.seterr(divide='ignore')
spec = np.pad(spec, (cfwidth, cfwidth), 'constant', constant_values=0)
wt = np.abs(wt)
wt = np.pad(wt, (cfwidth, cfwidth), 'constant',
constant_values=(np.min(wt) / 1000., np.min(wt) / 1000.))
# do some striding for speed
swin = rolling_window(spec, cfwidth) # create window container array
wwin = rolling_window(wt, cfwidth) # create window container array
# sort based on data
srt = np.argsort(swin, axis=-1)
ind = np.ogrid[0:swin.shape[0], 0:swin.shape[1]]
sdata = swin[ind[0], srt]
swt = wwin[ind[0], srt]
# calculate accumulated weights
awt = np.cumsum(swt, axis=-1)
# new weightsort for normalization and consideration of data
nw = (awt - 0.5 * swt) / awt[:, -1][:, np.newaxis]
# find the midpoint in the new weight sort
s = np.argmin(np.abs(nw - 0.5), axis=-1)
sl = np.arange(len(s))
nws = nw[sl, s]
nws1 = nw[sl, s - 1]
f1 = (nws - 0.5) / (nws - nws1)
f2 = (0.5 - nws1) / (nws - nws1)
wmed = sdata[sl, s - 1] * f1 + sdata[sl, s] * f2
width = cfwidth // 2
wmed = wmed[width:-width - 1]
np.seterr(old_settings['divide'])
return wmed | 2642238419c6c503a8369a8c16464c74180db9c9 | 5,746 |
import subprocess
def execute_command(cmd, logfile):
"""
Function to execute a non-interactive command and
return the output of the command if there is some
"""
try:
rows, columns = subprocess.check_output(["stty", "size"]).decode().split()
child = pexpect.spawn(
"/bin/bash",
["-c", cmd.strip()],
logfile=logfile,
encoding="utf-8",
timeout=300,
)
child.setwinsize(int(rows), int(columns))
child.expect(pexpect.EOF)
child.close()
return child.before
except pexpect.exceptions.ExceptionPexpect as e:
print(e)
print("Error in command: " + cmd)
return None | 8553b5b8010353fe28b00d1fbca79b6ee229ec6b | 5,747 |
def _read(filename, format=None, **kwargs):
"""
Reads a single event file into a ObsPy Catalog object.
"""
catalog, format = _read_from_plugin('event', filename, format=format,
**kwargs)
for event in catalog:
event._format = format
return catalog | 1489d72bacb445d8101d7e4b599c672359680ce5 | 5,748 |
def compute_encrypted_key_powers(s, k):
"""
Compute the powers of the custody key s, encrypted using Paillier. The validator
(outsourcer) gives these to the provider so they can compute the proof of custody
for them.
"""
spower = 1
enc_spowers = []
for i in range(k + 2):
enc_spowers.append(encrypt(spower))
spower = spower * s % r
return enc_spowers | d7654018501096eebcc7b4dfa50f342a5522c528 | 5,749 |
def idiosyncratic_var_vector(returns, idiosyncratic_var_matrix):
"""
Get the idiosyncratic variance vector
Parameters
----------
returns : DataFrame
Returns for each ticker and date
idiosyncratic_var_matrix : DataFrame
Idiosyncratic variance matrix
Returns
-------
idiosyncratic_var_vector : DataFrame
Idiosyncratic variance Vector
"""
#TODO: Implement function
return pd.DataFrame(data=np.diag(idiosyncratic_var_matrix),index=returns.columns) | fd7d1344f4e5941c4f9d239a0eaf2a6cac088973 | 5,750 |
def get_ticker_quote_type(ticker: str) -> str:
"""Returns the quote type of ticker symbol
Parameters
----------
ticker : str
ticker symbol of organization
Returns
-------
str
quote type of ticker
"""
yf_ticker = yf.Ticker(ticker)
info = yf_ticker.info
return info["quoteType"] if "quoteType" in info else "" | 7f105789d88591f240e753df104bb7428eef5f74 | 5,751 |
def sensitivity_score(y_true, y_pred):
"""
Compute classification sensitivity score
Classification sensitivity (also named true positive rate or recall) measures
the proportion of actual positives (class 1) that are correctly identified as
positives. It is defined as follows:
TP
sensitivity = ---------
TP + FN
Parameters
----------
y_true : numpy array
1D labels array of ground truth labels
y_pred : numpy array
1D labels array of predicted labels
Returns
-------
Score value (float)
"""
# Compute the sensitivity score
return recall_score(y_true, y_pred) | bb09213eca5a6696e92ebafa204975bc3e6e2f7b | 5,752 |
def wrap_compute_softmax(topi_compute):
"""Wrap softmax topi compute"""
def _compute_softmax(attrs, inputs, out_type):
axis = attrs.get_int("axis")
return [topi_compute(inputs[0], axis)]
return _compute_softmax | 3a5e3843f77d8bdfefc0f77b878f135aac4896f6 | 5,753 |
from typing import Any
import requests
def post(url: str, **kwargs: Any) -> dict:
"""Helper function for performing a POST request."""
return __make_request(requests.post, url, **kwargs) | 37a4b7c7128349248f0ecd64ce53d118265ed40e | 5,754 |
import pandas as pd
import os
def vote92(path):
"""Reports of voting in the 1992 U.S. Presidential election.
Survey data containing self-reports of vote choice in the 1992 U.S.
Presidential election, with numerous covariates, from the 1992 American
National Election Studies.
A data frame with 909 observations on the following 10 variables.
`vote`
a factor with levels `Perot` `Clinton` `Bush`
`dem`
a numeric vector, 1 if the respondent reports identifying with the
Democratic party, 0 otherwise.
`rep`
a numeric vector, 1 if the respondent reports identifying with the
Republican party, 0 otherwise
`female`
a numeric vector, 1 if the respondent is female, 0 otherwise
`persfinance`
a numeric vector, -1 if the respondent reports that their personal
financial situation has gotten worse over the last 12 months, 0 for
no change, 1 if better
`natlecon`
a numeric vector, -1 if the respondent reports that national
economic conditions have gotten worse over the last 12 months, 0 for
no change, 1 if better
`clintondis`
a numeric vector, squared difference between respondent's
self-placement on a scale measure of political ideology and the
respondent's placement of the Democratic candidate, Bill Clinton
`bushdis`
a numeric vector, squared ideological distance of the respondent
from the Republican candidate, President George H.W. Bush
`perotdis`
a numeric vector, squared ideological distance of the respondent
from the Reform Party candidate, Ross Perot
Alvarez, R. Michael and Jonathan Nagler. 1995. Economics, issues and the
Perot candidacy: Voter choice in the 1992 Presidential election.
*American Journal of Political Science*. 39:714-44.
Miller, Warren E., Donald R. Kinder, Steven J. Rosenstone and the
National Election Studies. 1999. *National Election Studies, 1992:
Pre-/Post-Election Study*. Center for Political Studies, University of
Michigan: Ann Arbor, Michigan.
Inter-University Consortium for Political and Social Research. Study
Number 1112. http://dx.doi.org/10.3886/ICPSR01112.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `vote92.csv`.
Returns:
Tuple of np.ndarray `x_train` with 909 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'vote92.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/pscl/vote92.csv'
maybe_download_and_extract(path, url,
save_file_name='vote92.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 1d08d8087b7ced665399011986b27c92d3f96140 | 5,755 |
def csr_full_col_slices(arr_data,arr_indices,arr_indptr,indptr,row):
"""
This algorithm is used for when all column dimensions are full slices with a step of one.
It might be worth it to make two passes over the array and use static arrays instead of lists.
"""
indices = []
data = []
for i,r in enumerate(row,1):
indices.extend(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data.extend(arr_data[arr_indptr[r]:arr_indptr[r+1]])
indptr[i] = indptr[i-1] + len(arr_indices[arr_indptr[r]:arr_indptr[r+1]])
data = np.array(data)
indices = np.array(indices)
return (data,indices,indptr) | bf5684a4b54988a86066a78d7887ec2e0473f3a9 | 5,756 |
def slide_period(scraping_period, vacancies):
"""Move upper period boundary to the value equal to the timestamp of the
last found vacancy."""
if not vacancies: # for cases when key 'total' = 0
return None
period_start, period_end = scraping_period
log(f'Change upper date {strtime_from_unixtime(period_end)}')
period_end = define_oldest_vacancy_unixtime(vacancies)
return period_start, period_end | fc7654835270cf78e7fb3007d0158c565717cf47 | 5,757 |
def merge_to_many(gt_data, oba_data, tolerance):
"""
Merge gt_data dataframe and oba_data dataframe using the nearest value between columns 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'. Before merging, the data is grouped by 'GT_Collector' on gt_data and
each row on gt_data will be paired with one or none of the rows on oba_data grouped by userId.
:param tolerance: maximum allowed difference (seconds) between 'gt_data.GT_DateTimeOrigUTC' and
'oba_data.Activity Start Date and Time* (UTC)'.
:param gt_data: dataframe with preprocessed data from ground truth XLSX data file
:param oba_data: dataframe with preprocessed data from OBA firebase export CSV data file
:return: dataframe with the merged data.
"""
# List of unique collectors and and unique users
list_collectors = gt_data['GT_Collector'].unique()
list_oba_users = oba_data['User ID'].unique()
# Create empty dataframes to be returned
merged_df = pd.DataFrame()
matches_df = pd.DataFrame()
all_unmatched_trips_df = pd.DataFrame()
list_total_trips = []
for collector in list_collectors:
print("Merging data for collector ", collector)
# Create dataframe for a collector on list_collectors
gt_data_collector = gt_data[gt_data["GT_Collector"] == collector]
# Make sure dataframe is sorted by 'ClosesTime'
gt_data_collector.sort_values('GT_DateTimeOrigUTC', inplace=True)
# Add total trips per collector
list_total_trips.append(len(gt_data_collector))
i = 0
for oba_user in list_oba_users:
# Create a dataframe with the oba_user activities only
oba_data_user = oba_data[oba_data["User ID"] == oba_user]
# Make sure dataframes is sorted by 'Activity Start Date and Time* (UTC)'
oba_data_user.sort_values('Activity Start Date and Time* (UTC)', inplace=True)
# Create df for OBA trips without GT Data match
oba_unmatched_trips_df = oba_data_user.copy()
# Iterate over each trip of one collector to match it with zero to many activities of an oba_data_user
for index, row in gt_data_collector.iterrows():
bunch_of_matches = oba_data_user[(oba_data_user['Activity Start Date and Time* (UTC)'] >=
row['GT_DateTimeOrigUTC']) &
(oba_data_user['Activity Start Date and Time* (UTC)'] <=
row['GT_DateTimeDestUTC'])
]
# Get the size of bunch_of_matches to create a repeated dataframe to concatenate with
if bunch_of_matches.empty:
len_bunch = 1
else:
len_bunch = bunch_of_matches.shape[0]
# Remove matched rows from unmatched trips df
oba_unmatched_trips_df = pd.merge(oba_unmatched_trips_df, bunch_of_matches, indicator=True, how='outer').\
query('_merge=="left_only"').drop('_merge', axis=1)
subset_df = gt_data_collector.loc[[index], :]
# Repeat the firs row `len_bunch` times.
new_df = pd.DataFrame(np.repeat(subset_df.values, len_bunch, axis=0))
new_df.columns = gt_data_collector.columns
# Add backup Start Time Columns
new_df['GT_DateTimeOrigUTC_Backup'] = new_df['GT_DateTimeOrigUTC']
# Remove (Fill with NaN) repeated GT rows unless required no to
if len_bunch > 1 and not command_line_args.repeatGtRows:
new_df.loc[1:, new_df.columns.difference(['GT_DateTimeOrigUTC', 'GT_LatOrig', 'GT_LonOrig',
'GT_TourID', 'GT_TripID'])] = np.NaN
temp_merge = pd.concat([new_df.reset_index(drop=True), bunch_of_matches.reset_index(drop=True)],
axis=1)
# Make sure the bunch of matches has the 'User Id' even for the empty rows
temp_merge["User ID"] = oba_user
# Merge running matches with current set of found matches
merged_df = pd.concat([merged_df, temp_merge], ignore_index=True)
# Add oba_user and number of many matches to the matches_df
subset_df["User ID"] = oba_user[-4:]
subset_df["GT_NumberOfTransitions"] = 0 if bunch_of_matches.empty else len_bunch
matches_df = pd.concat([matches_df, subset_df], ignore_index=True)
# Reorder the OBA columns
oba_unmatched_trips_df= oba_unmatched_trips_df[constants.OBA_UNMATCHED_NEW_COLUMNS_ORDER]
# Add Collector and device to unmatched trips
oba_unmatched_trips_df['User ID'] = oba_user[-4:]
# oba_unmatched_trips_df['GT_Collector'] = collector
oba_unmatched_trips_df.insert(loc=0, column='GT_Collector', value=collector)
# Append the unmatched trips per collector/device to the all unmatched df
all_unmatched_trips_df = pd.concat([all_unmatched_trips_df, oba_unmatched_trips_df], ignore_index=True)
return merged_df, matches_df, all_unmatched_trips_df | 0c9bc4269127f063fca3e0c52b677e4c44636b7d | 5,758 |
def returnDevPage():
"""
Return page for the development input.
:return: rendered dev.html web page
"""
return render_template("dev.html") | d50134ebc84c40177bf6316a8997f38d9c9589fb | 5,759 |
def toeplitz(c, r=None):
"""Construct a Toeplitz matrix.
The Toeplitz matrix has constant diagonals, with ``c`` as its first column
and ``r`` as its first row. If ``r`` is not given, ``r == conjugate(c)`` is
assumed.
Args:
c (cupy.ndarray): First column of the matrix. Whatever the actual shape
of ``c``, it will be converted to a 1-D array.
r (cupy.ndarray, optional): First row of the matrix. If None,
``r = conjugate(c)`` is assumed; in this case, if ``c[0]`` is real,
the result is a Hermitian matrix. r[0] is ignored; the first row of
the returned matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
Returns:
cupy.ndarray: The Toeplitz matrix. Dtype is the same as
``(c[0] + r[0]).dtype``.
.. seealso:: :func:`cupyx.scipy.linalg.circulant`
.. seealso:: :func:`cupyx.scipy.linalg.hankel`
.. seealso:: :func:`cupyx.scipy.linalg.solve_toeplitz`
.. seealso:: :func:`cupyx.scipy.linalg.fiedler`
.. seealso:: :func:`scipy.linalg.toeplitz`
"""
c = c.ravel()
r = c.conjugate() if r is None else r.ravel()
return _create_toeplitz_matrix(c[::-1], r[1:]) | d8d9246a766b9bd081da5e082a9eb345cd40491b | 5,760 |
import os
def download_private_id_set_from_gcp(public_storage_bucket, storage_base_path):
"""Downloads private ID set file from cloud storage.
Args:
public_storage_bucket (google.cloud.storage.bucket.Bucket): google storage bucket where private_id_set.json
is stored.
storage_base_path (str): The storage base path of the bucket.
Returns:
str: private ID set file full path.
"""
storage_id_set_path = os.path.join(storage_base_path, 'content/private_id_set.json') if storage_base_path else \
'content/private_id_set.json'
private_artifacts_path = '/home/runner/work/content-private/content-private/content/artifacts'
private_id_set_path = private_artifacts_path + '/private_id_set.json'
if not os.path.exists(private_artifacts_path):
os.mkdir(private_artifacts_path)
is_private_id_set_file_exist = id_set_file_exists_in_bucket(public_storage_bucket, storage_id_set_path)
if is_private_id_set_file_exist:
index_blob = public_storage_bucket.blob(storage_id_set_path)
index_blob.download_to_filename(private_id_set_path)
else:
create_empty_id_set_in_artifacts(private_id_set_path)
return private_id_set_path if os.path.exists(private_id_set_path) else '' | 05c29687c1c2350510e95d6d21de3ac3130c25f9 | 5,761 |
def matmul_op_select(x1_shape, x2_shape, transpose_x1, transpose_x2):
"""select matmul op"""
x1_dim, x2_dim = len(x1_shape), len(x2_shape)
if x1_dim == 1 and x2_dim == 1:
matmul_op = P.Mul()
elif x1_dim <= 2 and x2_dim <= 2:
transpose_x1 = False if x1_dim == 1 else transpose_x1
transpose_x2 = False if x2_dim == 1 else transpose_x2
matmul_op = P.MatMul(transpose_x1, transpose_x2)
elif x1_dim == 1 and x2_dim > 2:
matmul_op = P.BatchMatMul(False, transpose_x2)
elif x1_dim > 2 and x2_dim == 1:
matmul_op = P.BatchMatMul(transpose_x1, False)
else:
matmul_op = P.BatchMatMul(transpose_x1, transpose_x2)
return matmul_op | ee485178b9eab8f9a348dff7085b87740fac8955 | 5,762 |
def Align4(i):
"""Round up to the nearest multiple of 4. See unit tests."""
return ((i-1) | 3) + 1 | 16ff27823c30fcc7d03fb50fe0d7dbfab9557194 | 5,763 |
def poggendorff_parameters(illusion_strength=0, difference=0):
"""Compute Parameters for Poggendorff Illusion.
Parameters
----------
illusion_strength : float
The strength of the line tilt in biasing the perception of an uncontinuous single line.
Specifically, the orientation of the lines in degrees, 0 being vertical and
larger values (in magnitude; no change with positive or negative sign) rotating clockwise.
difference : float
The objective magnitude of the lines discontinuity.
Specifically, the amount of displacement of the right line relative to the left line. A positive sign
represents the right line displaced higher up, and a negative sign represents it displaced lower down.
Returns
-------
dict
Dictionary of parameters of the Poggendorff illusion.
"""
y_offset = difference
# Coordinates of left line
angle = 90 - illusion_strength
angle = angle if illusion_strength >= 0 else -angle
coord, _, _ = _coord_line(x1=0, y1=0, angle=-angle, length=0.75)
left_x1, left_y1, left_x2, left_y2 = coord
# Right line
coord, _, _ = _coord_line(x1=0, y1=y_offset, angle=180 - angle, length=0.75)
right_x1, right_y1, right_x2, right_y2 = coord
parameters = {
"Illusion": "Poggendorff",
"Illusion_Strength": illusion_strength,
"Difference": difference,
"Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent",
"Left_x1": left_x1,
"Left_y1": left_y1,
"Left_x2": left_x2,
"Left_y2": left_y2,
"Right_x1": right_x1,
"Right_y1": right_y1,
"Right_x2": right_x2,
"Right_y2": right_y2,
"Angle": angle,
"Rectangle_Height": 1.75,
"Rectangle_Width": 0.5,
"Rectangle_y": 0,
}
return parameters | 22647299bd7ed3c126f6ac22866dab94809723db | 5,764 |
from typing import List
from typing import Tuple
def download_blobs(blobs: List[storage.Blob]) -> List[Tuple[str, str]]:
"""Download blobs from bucket."""
files_list = []
for blob in blobs:
tmp_file_name = "-".join(blob.name.split("/")[1:])
file_name = blob.name.split("/")[-1]
tmp_file_path = f"/tmp/{tmp_file_name}"
blob.download_to_filename(tmp_file_path)
files_list.append((file_name, tmp_file_path))
return files_list | e2ea0f373f6097a34e1937944603456d52771220 | 5,765 |
def testMarkov2(X, ns, alpha, verbose=True):
"""Test second-order Markovianity of symbolic sequence X with ns symbols.
Null hypothesis:
first-order MC <=>
p(X[t+1] | X[t], X[t-1]) = p(X[t+1] | X[t], X[t-1], X[t-2])
cf. Kullback, Technometrics (1962), Table 10.2.
Args:
x: symbolic sequence, symbols = [0, 1, 2, ...]
ns: number of symbols
alpha: significance level
Returns:
p: p-value of the Chi2 test for independence
"""
if verbose:
print("\nSECOND-ORDER MARKOVIANITY:")
n = len(X)
f_ijkl = np.zeros((ns,ns,ns,ns))
f_ijk = np.zeros((ns,ns,ns))
f_jkl = np.zeros((ns,ns,ns))
f_jk = np.zeros((ns,ns))
for t in range(n-3):
i = X[t]
j = X[t+1]
k = X[t+2]
l = X[t+3]
f_ijkl[i,j,k,l] += 1.0
f_ijk[i,j,k] += 1.0
f_jkl[j,k,l] += 1.0
f_jk[j,k] += 1.0
T = 0.0
for i, j, k, l in np.ndindex(f_ijkl.shape):
f = f_ijkl[i,j,k,l]*f_ijk[i,j,k]*f_jkl[j,k,l]*f_jk[j,k]
if (f > 0):
num_ = f_ijkl[i,j,k,l]*f_jk[j,k]
den_ = f_ijk[i,j,k]*f_jkl[j,k,l]
T += (f_ijkl[i,j,k,l]*np.log(num_/den_))
T *= 2.0
df = ns*ns*(ns-1)*(ns-1)
#p = chi2test(T, df, alpha)
p = chi2.sf(T, df, loc=0, scale=1)
if verbose:
print(f"p: {p:.2e} | t: {T:.3f} | df: {df:.1f}")
return p | 15200c720eecb36c9d9e6f2abeaa6ee2f075fd3f | 5,766 |
import aiohttp
import json
async def send_e_wechat_request(method, request_url, data):
"""
发送企业微信请求
:param method: string 请求方法
:param request_url: string 请求地址
:param data: json 数据
:return: result, err
"""
if method == 'GET':
try:
async with aiohttp.ClientSession() as session:
async with session.get(request_url, data=json.dumps(data)) as response:
try:
result = await response.json(encoding='utf-8')
except Exception as e:
return {}, e
except Exception as e:
return {}, e
if method == 'POST':
try:
async with aiohttp.ClientSession() as session:
async with session.post(request_url, data=json.dumps(data)) as response:
try:
result = await response.json(encoding='utf-8')
except Exception as e:
return {}, e
except Exception as e:
return {}, e
return result, None | e7bd7c4bcfd7a890733f9172aced3cd25b0185d4 | 5,767 |
def no_missing_terms(formula_name, term_set):
"""
Returns true if the set is not missing terms corresponding to the
entries in Appendix D, False otherwise. The set of terms should be exactly
equal, and not contain more or less terms than expected.
"""
reqd_terms = dimless_vertical_coordinates[formula_name]
def has_all_terms(reqd_termset):
return len(reqd_termset ^ term_set) == 0
if isinstance(reqd_terms, set):
return has_all_terms(reqd_terms)
# if it's not a set, it's likely some other form of iterable with multiple
# possible definitions i.e. a/ap are interchangeable in
else:
return any(has_all_terms(req) for req in reqd_terms) | 4edafafc728b58a297f994f525b8ea2dc3d4b9aa | 5,768 |
import torch
def initialize(X, num_clusters):
"""
initialize cluster centers
:param X: (torch.tensor) matrix
:param num_clusters: (int) number of clusters
:return: (np.array) initial state
"""
num_samples = X.shape[1]
bs = X.shape[0]
indices = torch.empty(X.shape[:-1], device=X.device, dtype=torch.long)
for i in range(bs):
indices[i] = torch.randperm(num_samples, device=X.device)
initial_state = torch.gather(X, 1, indices.unsqueeze(-1).repeat(1, 1, X.shape[-1])).reshape(bs, num_clusters, -1, X.shape[-1]).mean(dim=-2)
return initial_state | a704daf3997202f4358bb9f3fbd51524fee4afe5 | 5,769 |
def unflatten_satisfies(old_satisfies):
""" Convert satisfies from v2 to v1 """
new_satisfies = {}
for element in old_satisfies:
new_element = {}
# Handle exsiting data
add_if_exists(
new_data=new_element,
old_data=element,
field='narrative'
)
add_if_exists(
new_data=new_element,
old_data=element,
field='implementation_status'
)
# Handle covered_by
references = transform_covered_by(element.get('covered_by', {}))
control_key = element['control_key']
standard_key = element['standard_key']
if references:
new_element['references'] = references
# Unflatten
if standard_key not in new_satisfies:
new_satisfies[standard_key] = {}
if control_key not in new_satisfies[standard_key]:
new_satisfies[standard_key][control_key] = new_element
return new_satisfies | aba5e1f8d327b4e5d24b995068f8746bcebc9082 | 5,770 |
def require_backend(required_backend):
"""
Raise ``SkipTest`` unless the functional test configuration has
``required_backend``.
:param unicode required_backend: The name of the required backend.
:returns: A function decorator.
"""
def decorator(undecorated_object):
@wraps(undecorated_object)
def wrapper(*args, **kwargs):
config = get_blockdevice_config()
configured_backend = config.pop('backend')
skipper = skipUnless(
configured_backend == required_backend,
'The backend in the supplied configuration '
'is not suitable for this test. '
'Found: {!r}. Required: {!r}.'.format(
configured_backend, required_backend
)
)
decorated_object = skipper(undecorated_object)
result = decorated_object(*args, **kwargs)
return result
return wrapper
return decorator | 859ca429466962ebba30559637691daf04940381 | 5,771 |
import os
def clip(src, shp):
"""
:param src: shapefile class with karst polygons. sinks.shp in db.
:param shp: shapefile class with basin boundary.
:return: shapefile output and class with karst.
"""
driver = ogr.GetDriverByName("ESRI Shapefile")
src_ds = driver.Open(src.path, 0)
src_layer = src_ds.GetLayer()
clip_ds = driver.Open(shp.path, 0)
clip_layer = clip_ds.GetLayer()
clip_prj = clip_layer.GetSpatialRef()
src_prj = src_layer.GetSpatialRef()
if src.prj4 != shp.prj4:
to_fill = ogr.GetDriverByName('Memory')
ds = to_fill.CreateDataSource("project")
out_layer = ds.CreateLayer('poly', src_prj, ogr.wkbPolygon)
feature = shp.lyr.GetFeature(0)
transform = osr.CoordinateTransformation(clip_prj, src_prj)
transformed = feature.GetGeometryRef()
transformed.Transform(transform)
geom = ogr.CreateGeometryFromWkb(transformed.ExportToWkb())
defn = out_layer.GetLayerDefn()
feat = ogr.Feature(defn)
feat.SetGeometry(geom)
out_layer.CreateFeature(feat.Clone())
clip_layer = out_layer
srs = osr.SpatialReference()
srs.ImportFromProj4(src.prj4)
out_path = shp.path[:-4] + '_karst.shp'
if os.path.exists(out_path):
driver.DeleteDataSource(out_path)
out_ds = driver.CreateDataSource(out_path)
out_layer = out_ds.CreateLayer('FINAL', srs=srs, geom_type=ogr.wkbMultiPolygon)
ogr.Layer.Clip(src_layer, clip_layer, out_layer)
out_ds = None
karstshp = dbShp(path=out_path)
return karstshp | 427512487b73dd77ac420418bcace2b7693ca9bc | 5,772 |
def fixed_data(input_df, level, db_name):
"""修复日期、股票代码、数量单位及规范列名称"""
# 避免原地修改
df = input_df.copy()
df = _special_fix(df, level, db_name)
df = _fix_code(df)
df = _fix_date(df)
df = _fix_num_unit(df)
df = _fix_col_name(df)
return df | 9a56115c210403a01d5ce39ec6596d217a8d4cd9 | 5,773 |
def get_nsg_e(ocp: AcadosOcp):
""" number of slack variables for linear constraints on terminal state and controls """
return int(ocp.constraints.idxsg_e.shape[0]) | 0e69fc188dd7812748cf5b173b7cc187a187b125 | 5,774 |
def generate_test_samples(y, input_seq_len, output_seq_len):
"""
Generate all the test samples at one time
:param x: df
:param y:
:param input_seq_len:
:param output_seq_len:
:return:
"""
total_samples = y.shape[0]
input_batch_idxs = [list(range(i, i + input_seq_len+output_seq_len)) for i in
range((total_samples - input_seq_len - output_seq_len+1))]
input_seq = np.take(y, input_batch_idxs, axis=0)
return input_seq | cc849695598ac77b85e0209439ca8034844968fa | 5,775 |
import re
from bs4 import BeautifulSoup
def get_tv_torrent_torrentz( name, maxnum = 10, verify = True ):
"""
Returns a :py:class:`tuple` of candidate episode Magnet links found using the Torrentz_ torrent service and the string ``"SUCCESS"``, if successful.
:param str name: the episode string on which to search.
:param int maxnum: optional argument, the maximum number of magnet links to return. Default is 10. Must be :math:`\ge 5`.
:param bool verify: optional argument, whether to verify SSL connections. Default is ``True``.
:returns: if successful, then returns a two member :py:class:`tuple` the first member is a :py:class:`list` of elements that match the searched episode, ordered from *most* seeds and leechers to least. The second element is the string ``"SUCCESS"``. The keys in each element of the list are,
* ``title`` is the name of the candidate episode to download.
* ``seeders`` is the number of seeds for this Magnet link.
* ``leechers`` is the number of leeches for this Magnet link.
* ``link`` is the Magnet URI link.
If this is unsuccessful, then returns an error :py:class:`tuple` of the form returned by :py:meth:`return_error_raw <howdy.core.return_error_raw>`.
:rtype: tuple
.. warning:: As of |date|, I cannot get it to work when giving it valid episode searches, such as ``"The Simpsons S31E01"``. See :numref:`table_working_tvtorrents`.
.. _Torrentz: https://en.wikipedia.org/wiki/Torrentz
"""
names_of_trackers = map(lambda tracker: tracker.replace(':', '%3A').replace('/', '%2F'), [
'udp://tracker.opentrackr.org:1337/announce',
'udp://open.demonii.com:1337',
'udp://tracker.pomf.se:80/announce',
'udp://torrent.gresille.org:80/announce',
'udp://11.rarbg.com/announce',
'udp://11.rarbg.com:80/announce',
'udp://open.demonii.com:1337/announce',
'udp://tracker.openbittorrent.com:80',
'http://tracker.ex.ua:80/announce',
'http://tracker.ex.ua/announce',
'http://bt.careland.com.cn:6969/announce',
'udp://glotorrents.pw:6969/announce'
])
tracklist = ''.join(map(lambda tracker: '&tr=%s' % tracker, names_of_trackers ) )
#
def try_int( candidate, default_value=0):
"""
Try to convert ``candidate`` to int, or return the ``default_value``.
:param candidate: The value to convert to int
:param default_value: The value to return if the conversion fails
:return: ``candidate`` as int, or ``default_value`` if the conversion fails
"""
try:
return int(candidate)
except (ValueError, TypeError):
return default_value
def _split_description(description):
match = re.findall(r'[0-9]+', description)
return int(match[0]) * 1024 ** 2, int(match[1]), int(match[2])
#
url = 'https://torrentz2.eu/feed'
search_params = {'f': name }
scraper = cfscrape.create_scraper( )
response = scraper.get( url, params = search_params, verify = verify )
if response.status_code != 200:
return return_error_raw( 'FAILURE, request for %s did not work.' % name )
if not response.content.startswith(b'<?xml'):
return return_error_raw( 'ERROR, request content is not a valid XML block.' )
html = BeautifulSoup( response.content, 'lxml' )
items = []
for item in html('item'):
if item.category and 'tv' not in item.category.get_text(strip=True).lower():
continue
title = item.title.get_text(strip=True)
t_hash = item.guid.get_text(strip=True).rsplit('/', 1)[-1]
if not all([title, t_hash]):
continue
download_url = "magnet:?xt=urn:btih:" + t_hash + "&dn=" + '+'.join(title.split()) + tracklist
torrent_size, seeders, leechers = _split_description(item.find('description').text)
if get_maximum_matchval( title, name ) < 80: continue
myitem = {'title': title, 'link': download_url, 'seeders': seeders,
'leechers': leechers }
items.append(myitem)
if len( items ) == 0:
return return_error_raw(
'Failure, no tv shows or series satisfying criteria for getting %s.' % name)
items.sort(key=lambda d: try_int(d.get('seeders', 0)) +
try_int(d.get('leechers')), reverse=True)
items = items[:maxnum]
return items, 'SUCCESS' | 48d67a36b3736c26d188269cc3308b7ecdd1ecb4 | 5,776 |
def errfunc(p, x, y, numpoly, numharm):
""" function to calc the difference between input values and function """
return y - fitFunc(p, x, numpoly, numharm) | 1b075b08668656dcf2a395545d4af2f5ff36508f | 5,777 |
def create_token_column(col):
"""
Creates a cleaned and tokenised column
based on a sentence column in a dataframe
"""
# Convert it to lowercase
col = col.str.lower()
# Remove all non-alphanumeric characters
col = col.replace(r"\W", " ", regex=True)
# Collapse repeated spaces
col = col.replace(r"\s{2,}", " ", regex=True).str.strip()
# Split the strings into tokens
col = col.apply(word_tokenize)
# Lemmatise the column
col = lemmatise(col)
# Remove boring words
col = remove_simple(col)
# Rejoin the token lists into strings
col = col.apply(lambda x: " ".join(x))
# Return the final, cleaned version
return col | 71f798e176ae3a0c407e5c9cae50d699cb99db4b | 5,778 |
def get_browser(request, ):
"""
获取浏览器名
:param request:
:param args:
:param kwargs:
:return:
"""
ua_string = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_string)
return user_agent.get_browser() | 3cc6322baf3969e8d1936ccf8bd4f3d6bb423a5f | 5,779 |
def predict(X, centroids, ccov, mc):
"""Predict the entries in X, which contains NaNs.
Parameters
----------
X : np array
2d np array containing the inputs. Target are specified with numpy NaNs.
The NaNs will be replaced with the most probable result according to the
GMM model provided.
centroids : list
List of cluster centers - [ [x1,y1,..],..,[xN, yN,..] ]
ccov : list
List of cluster co-variances DxD matrices
mc : list
Mixing cofficients for each cluster (must sum to one) by default equal
for each cluster.
Returns
-------
var : list
List of variance
"""
samples, D = X.shape
variance_list = []
for i in range(samples):
row = X[i, :]
targets = np.isnan(row)
num_targets = np.sum(targets)
cen_cond, cov_cond, mc_cond = cond_dist(row, centroids, ccov, mc)
X[i, targets] = np.zeros(np.sum(targets))
vara = np.zeros((num_targets, num_targets))
varb = np.zeros((num_targets, num_targets))
for j in range(len(cen_cond)):
X[i,targets] = X[i,targets] + (cen_cond[j]*mc_cond[j])
vara = vara + mc_cond[j] * \
(np.dot(cen_cond[j], cen_cond[j]) + cov_cond[j])
varb = varb + mc_cond[j] * cen_cond[j]
variance_list.append(vara - np.dot(varb, varb))
return variance_list | 043046623022346dcda9383fa416cffb59875b30 | 5,780 |
def rotate_image(img, angle):
""" Rotate an image around its center
# Arguments
img: image to be rotated (np array)
angle: angle of rotation
returns: rotated image
"""
image_center = tuple(np.array(img.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
transformed_image = cv2.warpAffine(img, rot_mat, img.shape[1::-1], flags=cv2.INTER_LINEAR)
return transformed_image | 649cdf9870ac31bd6bb36708417f0d9e6f0e7214 | 5,781 |
def standardize_1d(self, func, *args, autoformat=None, **kwargs):
"""
Interpret positional arguments for the "1D" plotting methods so usage is
consistent. Positional arguments are standardized as follows:
* If a 2D array is passed, the corresponding plot command is called for
each column of data (except for ``boxplot`` and ``violinplot``, in which
case each column is interpreted as a distribution).
* If *x* and *y* or *latitude* and *longitude* coordinates were not
provided, and a `~pandas.DataFrame` or `~xarray.DataArray`, we
try to infer them from the metadata. Otherwise,
``np.arange(0, data.shape[0])`` is used.
Parameters
----------
%(standardize.autoformat)s
See also
--------
cycle_changer
Note
----
This function wraps {methods}
"""
# Sanitize input
# TODO: Add exceptions for methods other than 'hist'?
name = func.__name__
autoformat = _not_none(autoformat, rc['autoformat'])
_load_objects()
if not args:
return func(self, *args, **kwargs)
elif len(args) == 1:
x = None
y, *args = args
elif len(args) <= 4: # max signature is x, y, z, color
x, y, *args = args
else:
raise ValueError(
f'{name}() takes up to 4 positional arguments but {len(args)} was given.'
)
vert = kwargs.get('vert', None)
if vert is not None:
orientation = ('vertical' if vert else 'horizontal')
else:
orientation = kwargs.get('orientation', 'vertical')
# Iterate through list of ys that we assume are identical
# Standardize based on the first y input
if len(args) >= 1 and 'fill_between' in name:
ys, args = (y, args[0]), args[1:]
else:
ys = (y,)
ys = [_to_arraylike(y) for y in ys]
# Auto x coords
y = ys[0] # test the first y input
if x is None:
axis = int(
name in ('hist', 'boxplot', 'violinplot')
or any(kwargs.get(s, None) for s in ('means', 'medians'))
)
x, _ = _axis_labels_title(y, axis=axis)
x = _to_arraylike(x)
if x.ndim != 1:
raise ValueError(
f'x coordinates must be 1-dimensional, but got {x.ndim}.'
)
# Auto formatting
x_index = None # index version of 'x'
if not hasattr(self, 'projection'):
# First handle string-type x-coordinates
kw = {}
xname = 'y' if orientation == 'horizontal' else 'x'
yname = 'x' if xname == 'y' else 'y'
if _is_string(x):
if name in ('hist',):
kwargs.setdefault('labels', list(x))
else:
x_index = np.arange(len(x))
kw[xname + 'locator'] = mticker.FixedLocator(x_index)
kw[xname + 'formatter'] = mticker.IndexFormatter(x)
kw[xname + 'minorlocator'] = mticker.NullLocator()
if name == 'boxplot': # otherwise IndexFormatter is overridden
kwargs['labels'] = x
# Next handle labels if 'autoformat' is on
# NOTE: Do not overwrite existing labels!
if autoformat:
# Ylabel
y, label = _axis_labels_title(y)
iname = xname if name in ('hist',) else yname
if label and not getattr(self, f'get_{iname}label')():
# For histograms, this label is used for *x* coordinates
kw[iname + 'label'] = label
if name not in ('hist',):
# Xlabel
x, label = _axis_labels_title(x)
if label and not getattr(self, f'get_{xname}label')():
kw[xname + 'label'] = label
# Reversed axis
if name not in ('scatter',):
if x_index is None and len(x) > 1 and x[1] < x[0]:
kw[xname + 'reverse'] = True
# Appply
if kw:
self.format(**kw)
# Standardize args
if x_index is not None:
x = x_index
if name in ('boxplot', 'violinplot'):
ys = [_to_ndarray(yi) for yi in ys] # store naked array
kwargs['positions'] = x
# Basemap shift x coordiantes without shifting y, we fix this!
if getattr(self, 'name', '') == 'basemap' and kwargs.get('latlon', None):
ix, iys = x, []
xmin, xmax = self.projection.lonmin, self.projection.lonmax
for y in ys:
# Ensure data is monotonic and falls within map bounds
ix, iy = _enforce_bounds(*_fix_latlon(x, y), xmin, xmax)
iys.append(iy)
x, ys = ix, iys
# WARNING: For some functions, e.g. boxplot and violinplot, we *require*
# cycle_changer is also applied so it can strip 'x' input.
with rc.context(autoformat=autoformat):
return func(self, x, *ys, *args, **kwargs) | 332bb996cdd6d9991b64c42067110ec11ecb358b | 5,782 |
def build_transformer_crf_model(config):
"""
"""
src_vocab_size = config["src_vocab_size"]
src_max_len = config["src_max_len"]
n_heads = config["n_heads"]
d_model = config["d_model"]
d_ff = config["d_ff"]
d_qk = config.get("d_qk", d_model//n_heads)
d_v = config.get("d_v", d_model//n_heads)
n_enc_layers = config["n_enc_layers"]
dropout = config.get("dropout", 0)
n_labels = config["n_labels"]
share_layer_params = config.get("share_layer_params", False)
n_share_across_layers = config.get("n_share_across_layers", 1)
embedding_size = config.get("embedding_size", None)
use_pre_norm = config.get("use_pre_norm", True)
activation = config.get("activation", "relu")
scale_embedding = config.get("scale_embedding", False)
transformer = TransformerCRF(config["symbol2id"],
src_vocab_size,
src_max_len,
n_heads,
d_model,
d_ff,
d_qk,
d_v,
n_enc_layers,
dropout,
n_labels,
embedding_size,
share_layer_params,
n_share_across_layers,
use_pre_norm,
activation,
scale_embedding)
return transformer | e95e0ff30b450c3e55c4a38762cb417c8cbea5a5 | 5,783 |
def frac_mole_to_weight(nfrac, MM):
"""
Args:
nfrac(np.array): mole fraction of each compound
MM(np.array): molar mass of each compound
"""
return nfrac * MM / (nfrac * MM).sum() | 8e9fce630e3bf4efbd05956bea3708c5b7958d11 | 5,784 |
from datetime import datetime
def get_close_hour_local():
"""
gets closing hour in local machine time (4 pm Eastern)
"""
eastern_tz = timezone('US/Eastern')
eastern_close = datetime.datetime(year=2018, month=6, day=29, hour=16)
eastern_close = eastern_tz.localize(eastern_close)
return str(eastern_close.astimezone().hour) | 9a0b1256864e028a6cccda7465da0f0e4cc3a009 | 5,785 |
def parse_structure(node):
"""Turn a collapsed node in an OverlayGraph into a heirchaical grpah structure."""
if node is None:
return None
structure = node.sub_structure
if structure is None:
return node.name
elif structure.structure_type == "Sequence":
return {"Sequence" : [parse_structure(n) for n in structure.structure["sequence"]]}
elif structure.structure_type == "HeadBranch":
return {"Sequence" : [
{"Branch" : [parse_structure(n) for n in structure.structure["branches"]] },
parse_structure(structure.structure["head"])
]}
elif structure.structure_type == "TailBranch":
return {"Sequence" : [
parse_structure(structure.structure["tail"]),
{"Branch" : [parse_structure(n) for n in structure.structure["branches"]] },
]}
else:
data = {}
for k in structure.structure:
if isinstance(structure.structure[k], list):
data[k] = [parse_structure(n) for n in structure.structure[k]]
else:
data[k] = parse_structure(structure.structure[k])
return {structure.structure_type : data} | f9374ff9548789d5bf9b49db11083ed7a15debab | 5,786 |
import torch
def softmax_kl_loss(input_logits, target_logits, sigmoid=False):
"""Takes softmax on both sides and returns KL divergence
Note:
- Returns the sum over all examples. Divide by the batch size afterwards
if you want the mean.
- Sends gradients to inputs but not the targets.
"""
assert input_logits.size() == target_logits.size()
if sigmoid:
input_log_softmax = torch.log(torch.sigmoid(input_logits))
target_softmax = torch.sigmoid(target_logits)
else:
input_log_softmax = F.log_softmax(input_logits, dim=1)
target_softmax = F.softmax(target_logits, dim=1)
# return F.kl_div(input_log_softmax, target_softmax)
kl_div = F.kl_div(input_log_softmax, target_softmax, reduction='mean')
# mean_kl_div = torch.mean(0.2*kl_div[:,0,...]+0.8*kl_div[:,1,...])
return kl_div | 8faaee09947dca5977744b3f9c659ad24d3377e8 | 5,787 |
def cut_rod_top_down_cache(p, n):
"""
Only difference from book is creating the array to n+1 since range doesn't
include the end bound.
"""
r = [-100000 for i in range(n + 1)]
return cut_rod_top_down_cache_helper(p, n, r) | 36b35ec560fea005ae49950cf63f0dc4f787d8d0 | 5,788 |
def column_ids_to_names(convert_table, sharepoint_row):
""" Replace the column ID used by SharePoint by their column names for use in DSS"""
return {convert_table[key]: value for key, value in sharepoint_row.items() if key in convert_table} | 6ae1474823b0459f4cf3b10917286f709ddea520 | 5,789 |
def recursive_olsresiduals(res, skip=None, lamda=0.0, alpha=0.95,
order_by=None):
"""
Calculate recursive ols with residuals and Cusum test statistic
Parameters
----------
res : RegressionResults
Results from estimation of a regression model.
skip : int, default None
The number of observations to use for initial OLS, if None then skip is
set equal to the number of regressors (columns in exog).
lamda : float, default 0.0
The weight for Ridge correction to initial (X'X)^{-1}.
alpha : {0.90, 0.95, 0.99}, default 0.95
Confidence level of test, currently only two values supported,
used for confidence interval in cusum graph.
order_by : array_like, default None
Integer array specifying the order of the residuals. If not provided,
the order of the residuals is not changed. If provided, must have
the same number of observations as the endogenous variable.
Returns
-------
rresid : ndarray
The recursive ols residuals.
rparams : ndarray
The recursive ols parameter estimates.
rypred : ndarray
The recursive prediction of endogenous variable.
rresid_standardized : ndarray
The recursive residuals standardized so that N(0,sigma2) distributed,
where sigma2 is the error variance.
rresid_scaled : ndarray
The recursive residuals normalize so that N(0,1) distributed.
rcusum : ndarray
The cumulative residuals for cusum test.
rcusumci : ndarray
The confidence interval for cusum test using a size of alpha.
Notes
-----
It produces same recursive residuals as other version. This version updates
the inverse of the X'X matrix and does not require matrix inversion during
updating. looks efficient but no timing
Confidence interval in Greene and Brown, Durbin and Evans is the same as
in Ploberger after a little bit of algebra.
References
----------
jplv to check formulas, follows Harvey
BigJudge 5.5.2b for formula for inverse(X'X) updating
Greene section 7.5.2
Brown, R. L., J. Durbin, and J. M. Evans. “Techniques for Testing the
Constancy of Regression Relationships over Time.”
Journal of the Royal Statistical Society. Series B (Methodological) 37,
no. 2 (1975): 149-192.
"""
y = res.model.endog
x = res.model.exog
order_by = array_like(order_by, "order_by", dtype="int", optional=True,
ndim=1, shape=(y.shape[0],))
# intialize with skip observations
if order_by is not None:
x = x[order_by]
y = y[order_by]
nobs, nvars = x.shape
if skip is None:
skip = nvars
rparams = np.nan * np.zeros((nobs, nvars))
rresid = np.nan * np.zeros(nobs)
rypred = np.nan * np.zeros(nobs)
rvarraw = np.nan * np.zeros(nobs)
x0 = x[:skip]
if np.linalg.matrix_rank(x0) < x0.shape[1]:
err_msg = """\
"The initial regressor matrix, x[:skip], issingular. You must use a value of
skip large enough to ensure that the first OLS estimator is well-defined.
"""
raise ValueError(err_msg)
y0 = y[:skip]
# add Ridge to start (not in jplv)
xtxi = np.linalg.inv(np.dot(x0.T, x0) + lamda * np.eye(nvars))
xty = np.dot(x0.T, y0) # xi * y #np.dot(xi, y)
beta = np.dot(xtxi, xty)
rparams[skip - 1] = beta
yipred = np.dot(x[skip - 1], beta)
rypred[skip - 1] = yipred
rresid[skip - 1] = y[skip - 1] - yipred
rvarraw[skip - 1] = 1 + np.dot(x[skip - 1], np.dot(xtxi, x[skip - 1]))
for i in range(skip, nobs):
xi = x[i:i + 1, :]
yi = y[i]
# get prediction error with previous beta
yipred = np.dot(xi, beta)
rypred[i] = yipred
residi = yi - yipred
rresid[i] = residi
# update beta and inverse(X'X)
tmp = np.dot(xtxi, xi.T)
ft = 1 + np.dot(xi, tmp)
xtxi = xtxi - np.dot(tmp, tmp.T) / ft # BigJudge equ 5.5.15
beta = beta + (tmp * residi / ft).ravel() # BigJudge equ 5.5.14
rparams[i] = beta
rvarraw[i] = ft
rresid_scaled = rresid / np.sqrt(rvarraw) # N(0,sigma2) distributed
nrr = nobs - skip
# sigma2 = rresid_scaled[skip-1:].var(ddof=1) #var or sum of squares ?
# Greene has var, jplv and Ploberger have sum of squares (Ass.:mean=0)
# Gretl uses: by reverse engineering matching their numbers
sigma2 = rresid_scaled[skip:].var(ddof=1)
rresid_standardized = rresid_scaled / np.sqrt(sigma2) # N(0,1) distributed
rcusum = rresid_standardized[skip - 1:].cumsum()
# confidence interval points in Greene p136 looks strange. Cleared up
# this assumes sum of independent standard normal, which does not take into
# account that we make many tests at the same time
if alpha == 0.90:
a = 0.850
elif alpha == 0.95:
a = 0.948
elif alpha == 0.99:
a = 1.143
else:
raise ValueError("alpha can only be 0.9, 0.95 or 0.99")
# following taken from Ploberger,
# crit = a * np.sqrt(nrr)
rcusumci = (a * np.sqrt(nrr) + 2 * a * np.arange(0, nobs - skip) / np.sqrt(
nrr)) * np.array([[-1.], [+1.]])
return (rresid, rparams, rypred, rresid_standardized, rresid_scaled,
rcusum, rcusumci) | 36e74d41920d3c176365c753bcf6cfae6e6cd20d | 5,790 |
def is_RationalField(x):
"""
Check to see if ``x`` is the rational field.
EXAMPLES::
sage: from sage.rings.rational_field import is_RationalField as is_RF
sage: is_RF(QQ)
True
sage: is_RF(ZZ)
False
"""
return isinstance(x, RationalField) | 7ab6b67eb666ae85456d48f1b79e180634252066 | 5,791 |
def add_nearest_neighbor_value_field(ptype, coord_name, sampled_field, registry):
"""
This adds a nearest-neighbor field, where values on the mesh are assigned
based on the nearest particle value found. This is useful, for instance,
with voronoi-tesselations.
"""
field_name = ("deposit", f"{ptype}_nearest_{sampled_field}")
field_units = registry[ptype, sampled_field].units
unit_system = registry.ds.unit_system
def _nearest_value(field, data):
pos = data[ptype, coord_name]
pos = pos.convert_to_units("code_length")
value = data[ptype, sampled_field].in_base(unit_system.name)
rv = data.smooth(
pos, [value], method="nearest", create_octree=True, nneighbors=1
)
rv = data.apply_units(rv, field_units)
return rv
registry.add_field(
field_name,
sampling_type="cell",
function=_nearest_value,
validators=[ValidateSpatial(0)],
units=field_units,
)
return [field_name] | a0078a3baf1ff9525c4445225ca334609ded7e24 | 5,792 |
def create_directory_if_not_exists(dir_path):
""" Create directory path if it doesn't exist """
if not path_exists(dir_path):
mkdir_p(dir_path)
print('Creating {}'.format(dir_path))
return True
return False | 2eb62dbfb180e82296f8aba66e528bf749f357db | 5,793 |
def rdkit_smiles():
"""Assign the SMILES by RDKit on the new structure."""
new_smiles = ""
mol = Chem.MolFromMolFile("output.sdf")
new_smiles = Chem.MolToSmiles(mol, isomericsmiles=False)
return new_smiles | 10de3f05bb4b6edefabe28134deae4371cc2cd2a | 5,794 |
def load_ref_case(fname, name):
"""Loads PV power or Load from the reference cases
:param fname: Path to mat file
:type fname: string
:param name: Identifier for PV Power or Load
:type name: string
:return: Returns PV power or load from the reference case
:rtype: numpy array
"""
with open(fname, 'rb') as f:
a = np.load(f)
data = a[name]
return data | fc03fa8f9ef2070d2a6da741579f740fa85fa917 | 5,795 |
import uuid
def make_unique_id():
"""Make a new UniqueId."""
return uuid.uuid4()
# return UniqueId(uuid.uuid4()) | c7ab0e5242a954db75638b3193609d49f0097287 | 5,796 |
import gzip
def read_uni(filename):
"""
Read a '*.uni' file. Returns the header as a dictionary and the content as
a numpy-array.
"""
with gzip.open(filename, 'rb') as bytestream:
header = _read_uni_header(bytestream)
array = _read_uni_array(bytestream, header)
return header, array | c930d4dd8de9c5da10a4e31be6b987cc4f0f25ac | 5,797 |
def _F(startmat,endmat):
"""Calculate the deformation tensor
to go from start to end
:startmat: ndarray
:endmat: ndarray
:returns: ndarray
"""
F=np.dot(endmat,np.linalg.inv(startmat))
return F | 2a357d55e0f73c6f827c35ab72673f6b42875129 | 5,798 |
from typing import OrderedDict
import logging
def get_docstrings(target, functions):
""" Proceses functions in target module and prompts user for documentation if none exists.
:param target: Loaded target python module
:param functions: List of defined functions in target module
:returns: Dict containing raw comments entered by user
"""
new_docs = {}
for funcname, theclass in functions.items():
# Init dict for this function's params
func_docs = OrderedDict()
func_docs['description'] = input('Enter brief function description for {0}: '.format(funcname))
if theclass is 'noclass':
myfunc = getattr(target, funcname)
if myfunc.__doc__ is None:
# Init dict for this function's params
myfunc = getattr(target, funcname)
sig = signature(myfunc)
logging.info('Ingesting doc for {0} with signature {1}'.format(funcname, str(sig)))
params = sig.parameters
for p in params:
p = 'param:'+p
func_docs[p] = input('Enter type and description for parameter {0} in {1}: '.format(p, funcname))
# Ingest return value doc
ret_doc = input('Enter return value description: ')
func_docs['returns'] = ret_doc
# Place param comment dict into return new_docs dict
new_docs[funcname] = func_docs
else:
myfunc = getattr(theclass, funcname)
if myfunc.__doc__ is None:
sig = signature(myfunc)
logging.info('Ingesting doc for {0} with signature {1}'.format(funcname, str(sig)))
params = sig.parameters
for p in params:
p = 'param:'+p
func_docs[p] = input('Enter type and description for parameter {0} in {1}: '.format(p, funcname))
# Ingest return value doc
ret_doc = input('Enter return value description: ')
func_docs['returns'] = ret_doc
# Place param comment dict into return new_docs dict
new_docs[funcname] = func_docs
return new_docs | 98bc2e4267415e74b70a5342115d4dcabfbefcba | 5,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.