content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import click
def parse_rangelist(rli):
"""Parse a range list into a list of integers"""
try:
mylist = []
for nidrange in rli.split(","):
startstr, sep, endstr = nidrange.partition("-")
start = int(startstr, 0)
if sep:
end = int(endstr, 0)
if end < start:
mylist.extend(range(start, end - 1, -1))
else:
mylist.extend(range(start, end + 1))
else:
mylist.append(start)
except ValueError:
# pylint: disable=raise-missing-from
raise click.ClickException("Invalid range list %s" % rli)
return mylist
|
321496a1170b81d02b8378d687d8ce6d6295bff6
| 32,020 |
def transcript_segments(location_descriptors, gene_descriptors):
"""Provide possible transcript_segment input."""
return [
{
"transcript": "refseq:NM_152263.3",
"exon_start": 1,
"exon_start_offset": -9,
"exon_end": 8,
"exon_end_offset": 7,
"gene_descriptor": gene_descriptors[0],
"component_genomic_start": location_descriptors[2],
"component_genomic_end": location_descriptors[3]
},
{
"component_type": "transcript_segment",
"transcript": "refseq:NM_034348.3",
"exon_start": 1,
"exon_end": 8,
"gene_descriptor": gene_descriptors[3],
"component_genomic_start": location_descriptors[0],
"component_genomic_end": location_descriptors[1]
},
{
"component_type": "transcript_segment",
"transcript": "refseq:NM_938439.4",
"exon_start": 7,
"exon_end": 14,
"exon_end_offset": -5,
"gene_descriptor": gene_descriptors[4],
"component_genomic_start": location_descriptors[0],
"component_genomic_end": location_descriptors[1]
},
{
"component_type": "transcript_segment",
"transcript": "refseq:NM_938439.4",
"exon_start": 7,
"gene_descriptor": gene_descriptors[4],
"component_genomic_start": location_descriptors[0]
}
]
|
3ca9041ff278dcd19432b6d314b9c01de6be1983
| 32,022 |
def perform_data_filtering_q2(data):
"""
Takes the original DataFrame.
Returns the altered DataFrame necessary for Q2.
"""
# redoing the dataframe columns based on different values
df = data
diseased = df['num'] != 0
df['num'] = np.where(diseased, 'diseased', 'healthy')
males = df['sex'] == 1
df['sex'] = np.where(males, 'male', 'female')
return df
|
2c8943bda66722b70a5dd25cb5a7c7473e40e67c
| 32,023 |
def player_with_name_and_value(source):
"""
source: pn.widgets.DiscretePlayer()
target: consists of source player's name, value and player itself
With pn.widgets.DiscretePlayer, we don't get name and
value updates in textual form. This method is useful
in case we want name and continuous value update.
"""
mark = pn.pane.Markdown(f'{source.value}')
def callback(*events):
for event in events:
if event.name == 'value':
mark.object = str(event.new)
source.param.watch(callback, ['value'], onlychanged=False)
target = pn.Column(pn.Row(source.name, mark), source)
return target
|
e7d415d798f9c6aefb203f861b08c9477dde32d7
| 32,024 |
def _cached_diff(expression, var):
"""
Derive expression with respect to a single variable.
:param expression: an expression to derive
:type expression: :class:`~sympy.Expr`
:param var: a variable
:type var: :class:`~sympy.Symbol`
:return: the derived expression
:type: :class:`~sympy.Expr`
"""
return sp.Derivative(expression, var, evaluate=True)
|
4a4206d327bee6f0c8168893e2bffbeaa23b9c14
| 32,025 |
def _get_mult_op_ ( klass1 , klass2 ) :
"""Get the proper multiplication operator
"""
t = klass1 , klass2
ops = _mult_ops_.get( t , None )
if ops : return ops ## RETURN
## try to load the operators
try :
ops = Ostap.Math.MultiplyOp ( klass1 , klass2 )
_mult_ops_ [ t ] = ops
return ops ## RETURN
except TypeError:
return None ## RETURN
return None ## RETURN
|
f7a000f697d4739894e1671e9cfeb23099e1ce4f
| 32,026 |
def datetime_into_columns(df, column, weekday = False, hour_minutes = False, from_type = 'object'):
"""
The function converts a column with a date from either int64 or object type
into separate columns with day - month - year
user can choose to add weekday - hour - minutes columns
Keyword arguments
df (Pandas DataFrame type)-- is the given dataframe
column (string) -- the chosen column to create new columns from
weekday (boolean) -- True if user wants new column with weekday value (default False)
hour_minutes (boolean) -- True if user wants two new columns with hour and minutes values (default False)
from_type (string) -- 'object' by default if original column type is object and
'int64' if original column type is int64
return: the resulting dataframe with the new colum
"""
if from_type == 'int64':
column = pd.to_datetime(df[column].astype(str))
else:
column = pd.to_datetime(df[column])
datetime = pd.DataFrame(column)
datetime['day'] = column.dt.day
datetime['month'] = column.dt.month
datetime['year'] = column.dt.year
if weekday == True:
datetime['weekday'] = column.dt.weekday
if hour_minutes == True:
datetime['hour'] = column.dt.hour
datetime['minutes'] = column.dt.minute
df = pd.concat([df, datetime], axis = 1)
df = df.loc[:,~df.columns.duplicated(keep='last')]
return df
|
e6178b33f113ef1430d0d8df3fe9b6c53d200e1e
| 32,027 |
def cis_codif_h1_moms(probe, starttime, endtime, sensitivity='high',
try_download=True):
"""
Load H+ moments from CIS instrument.
See https://caa.estec.esa.int/documents/UG/CAA_EST_UG_CIS_v35.pdf for more
information on the CIS data.
Parameters
----------
probe : string
Probe number. Must be '1', '2', '3', or '4'.
starttime : datetime
Interval start.
endtime : datetime
Interval end.
sensitivity : string, 'high' or 'low', default: 'low'
Load high or low sensitivity
Returns
-------
data : DataFrame
Requested data.
"""
sensitivitydict = {'high': 'HS', 'low': 'LS'}
sensitivity = sensitivitydict[sensitivity]
endstr = '_CP_CIS-CODIF_' + sensitivity + '_H1_MOMENTS'
return _load(probe, starttime, endtime, 'peace', endstr[1:],
try_download=try_download)
|
e8f014196c2a634d406aaeb86a3130e6db59049f
| 32,028 |
def readlines(file_path):
""" Read lines from fname, if the fname is loaded get the buffer."""
buffer = getbuffer(file_path)
if buffer and int(vim.eval('bufloaded(%d)' % buffer.number)):
return buffer
try:
with open(file_path, 'r') as fo:
# we are not decoding: since we have to assume that files are in &encoding
# and vim stores buffers, variables, ... in &encoding.
return fo.read().splitlines()
except IOError:
return []
|
a4f367a00f90095a17f9eaf29eb150e7a5176045
| 32,029 |
def convert_range_image_to_point_cloud(
frame, range_images, camera_projections, range_image_top_pose, ri_indexes=(0, 1)
):
"""Convert range images to point cloud. modified from
https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/utils/range_image_utils.py#L612
Args:
frame: open dataset frame
range_images: A dict of {laser_name, [range_image_first_return,
range_image_second_return]}.
camera_projections: A dict of {laser_name,
[camera_projection_from_first_return,
camera_projection_from_second_return]}.
range_image_top_pose: range image pixel pose for top lidar.
ri_indexes: 0 for the first return, 1 for the second return.
Returns:
points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars).
cp_points: {[N, 6]} list of camera projections of length 5
(number of lidars).
"""
tf = tensorflow
calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name)
ret_dict = defaultdict(list)
frame_pose = tf.convert_to_tensor(value=np.reshape(np.array(frame.pose.transform), [4, 4]))
# [H, W, 6]
range_image_top_pose_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image_top_pose.data), range_image_top_pose.shape.dims
)
# [H, W, 3, 3]
range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix(
range_image_top_pose_tensor[..., 0],
range_image_top_pose_tensor[..., 1],
range_image_top_pose_tensor[..., 2],
)
range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:]
range_image_top_pose_tensor = transform_utils.get_transform(
range_image_top_pose_tensor_rotation, range_image_top_pose_tensor_translation
)
for c in calibrations:
for ri_index in ri_indexes:
range_image = range_images[c.name][ri_index]
if len(c.beam_inclinations) == 0:
beam_inclinations = range_image_utils.compute_inclination(
tf.constant([c.beam_inclination_min, c.beam_inclination_max]),
height=range_image.shape.dims[0],
)
else:
beam_inclinations = tf.constant(c.beam_inclinations)
beam_inclinations = tf.reverse(beam_inclinations, axis=[-1])
extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4])
range_image_tensor = tf.reshape(
tf.convert_to_tensor(value=range_image.data), range_image.shape.dims
)
pixel_pose_local = None
frame_pose_local = None
if c.name == dataset_pb2.LaserName.TOP:
pixel_pose_local = range_image_top_pose_tensor
pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0)
frame_pose_local = tf.expand_dims(frame_pose, axis=0)
range_image_mask = range_image_tensor[..., 0] > 0
# No Label Zone
if FILTER_NO_LABEL_ZONE_POINTS:
nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ
range_image_mask = range_image_mask & nlz_mask
range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image(
tf.expand_dims(range_image_tensor[..., 0], axis=0),
tf.expand_dims(extrinsic, axis=0),
tf.expand_dims(tf.convert_to_tensor(value=beam_inclinations), axis=0),
pixel_pose=pixel_pose_local,
frame_pose=frame_pose_local,
)
range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0)
points_tensor = tf.gather_nd(
range_image_cartesian, tf.compat.v1.where(range_image_mask)
)
ret_dict["points_{}_{}".format(c.name, ri_index)].append(points_tensor.numpy())
# Note: channel 1 is intensity
# https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/dataset.proto#L176
intensity_tensor = tf.gather_nd(range_image_tensor[..., 1], tf.where(range_image_mask))
ret_dict["intensity_{}_{}".format(c.name, ri_index)].append(intensity_tensor.numpy())
return ret_dict
|
af6a6af4cfcde6f3b600ffe0add8a3a0b0870067
| 32,031 |
from typing import Dict
from typing import Tuple
def get_counts(circ: MyCircuit, n_shots: int, seed: int) -> Dict[Tuple[int, ...], int]:
"""Helper method for tests to summarise the shot table from the simulator
:param circ: The circuit to simulate
:type circ: MyCircuit
:param n_shots: The number of samples to take
:type n_shots: int
:param seed: Seed for the random sampling
:type seed: int
:return: Map from readout array to the number of instances observed in the shot table
:rtype: Dict[Tuple[int, ...], int]
"""
sim = MySimulator(circ)
shots = sim.sample(n_shots=n_shots, seed=seed)
rows, freqs = np.unique(shots, axis=0, return_counts=True)
return {tuple(r): f for r, f in zip(rows, freqs)}
|
e681fdc02e4cf7a637eac6f1e2096d9276b39cc6
| 32,032 |
def length(
inputs: tf.Tensor,
axis: int = -1,
keepdims: bool = False,
epsilon: float = 1e-10,
name: str = None
) -> tf.Tensor:
"""
Computes the vector length (2-norm) along specified ´axis´ of given Tensor ´inputs´.
Optionally an epsilon can be added to the squared norm before the square root is computed.
"""
with tf.name_scope(name, default_name="norm"):
if epsilon is None:
return tf.sqrt(tf.reduce_sum(tf.square(inputs), axis=axis, keepdims=keepdims))
else:
return tf.sqrt(tf.add(tf.reduce_sum(tf.square(inputs), axis=axis, keepdims=keepdims), epsilon))
|
a548110ec2d8e3512e73805aea690f22c7d50fe3
| 32,033 |
def class_label_matrix(labels, img_sizes, num_classes):
""" Computes the class label matrix of the training data. """
# Class label matrix
Y = list()
# Modeling the object detection problem as a binary classification problem (none, detection)
if num_classes == 2:
print('Modeling as a binary problem')
for sample in range(len(labels)):
# None
if len(labels[sample]) == 0:
Y.append(0)
# Detection (smoke or fire or both)
else:
Y.append(1)
# Modeling the object detection problem as a multiclass classification problem (none, fire, smoke)
if num_classes > 2:
print('Modeling as a multiclass problem')
# Pixels area per image
area = {'fire': 0, 'smoke': 0}
for sample in range(len(labels)):
# None
if len(labels[sample]) == 0:
Y.append(0)
# Detection
else:
# For each bounding box
for label in range(labels[sample].shape[0]):
# Class identifier
class_id = labels[sample][label][0]
# Normalized coordinates
xmin = labels[sample][label][1]
ymin = labels[sample][label][2]
xmax = labels[sample][label][3]
ymax = labels[sample][label][4]
# Image dimensions
height = img_sizes[sample][0]
width = img_sizes[sample][1]
# Coordinates without normalization
xmin, ymin, xmax, ymax = deconvert((width, height), (xmin, ymin, xmax, ymax))
# Sum the pixel areas according to the class
if class_id == 0:
area['smoke'] += (xmax - xmin) * (ymax - ymin)
else:
area['fire'] += (xmax - xmin) * (ymax - ymin)
# If the smoke pixel area is larger than the fire pixel area
if area['smoke'] > area['fire']:
Y.append(1)
# Otherwise
else:
Y.append(2)
# Resetting counters for the next image
area = area.fromkeys(area, 0)
# Convert a class vector (integers) to binary class matrix
Y = np.eye(num_classes, dtype = 'int')[Y]
# List to numpy array
Y = np.array(Y)
return Y
|
47efe5f8f76cac58d3afeaddb04d766ebdebf377
| 32,034 |
def default_lambda_consumer(env_id):
"""Create a default lambda consumer for the snapshot restore test."""
return st.consumer.LambdaConsumer(
metadata_provider=DictMetadataProvider(
CONFIG_DICT["measurements"],
SnapRestoreBaselinesProvider(env_id)
),
func=consume_output,
func_kwargs={})
|
ec7ec2ae6df4aaa7caf034537a5259b897755700
| 32,035 |
def thresholdPolyData(poly, attr, threshold, mode):
"""
Get the polydata after thresholding based on the input attribute
Args:
poly: vtk PolyData to apply threshold
atrr: attribute of the cell array
threshold: (min, max)
Returns:
output: resulted vtk PolyData
"""
surface_thresh = vtk.vtkThreshold()
surface_thresh.SetInputData(poly)
surface_thresh.ThresholdBetween(*threshold)
if mode=='cell':
surface_thresh.SetInputArrayToProcess(0, 0, 0,
vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, attr)
else:
surface_thresh.SetInputArrayToProcess(0, 0, 0,
vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, attr)
surface_thresh.Update()
surf_filter = vtk.vtkDataSetSurfaceFilter()
surf_filter.SetInputData(surface_thresh.GetOutput())
surf_filter.Update()
return surf_filter.GetOutput()
|
e4717b971c238d9c3a63a902db7eb91e2c630340
| 32,036 |
def TCh_GetNum(*args):
"""
TCh_GetNum(char const & Ch) -> int
Parameters:
Ch: char const &
"""
return _snap.TCh_GetNum(*args)
|
6caf9bcf71868604a6aedeceaba299a36a6bc62a
| 32,037 |
def writeFGSSPostageStampRequestById(outfile, requestName, results, xsize, ysize, psRequestType = 'byid', optionMask = 2049, imageType = 'warp', psJobType = 'stamp', skycell = 'null', email = '[email protected]', camera = 'gpc1', coordMask = 2):
"""writeFGSSPostageStampRequestById.
Args:
outfile:
requestName:
results:
xsize:
ysize:
psRequestType:
optionMask:
imageType:
psJobType:
skycell:
email:
camera:
coordMask:
"""
# "results" is the data set returned from the database of all the candidates. Need
# to construct a suitable query that contains the appropriate columns.
fileSuccessfullyWritten = False
hdu = pf.PrimaryHDU()
hdulist = pf.HDUList()
prihdr = hdu.header
prihdr.set('SIMPLE', True, 'file does conform to FITS standard')
prihdr.set('BITPIX', 16, comment='number of bits per data pixel')
prihdr.set('NAXIS', 0, comment='number of data axes')
prihdr.set('EXTEND', True, 'FITS dataset may contain extensions')
prihdr.add_comment(" FITS (Flexible Image Transport System) format is defined in 'Astronomy")
prihdr.add_comment(" and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H")
hdulist.append(hdu)
rownum = []
project = []
survey_name = []
ipp_release = []
job_type = []
option_mask = []
req_type = []
img_type = []
id = []
tess_id = []
component = []
coord_mask = []
center_x = [] # RA
center_y = [] # DEC
width = []
height = []
#label = []
data_group = []
reqfilt = []
mjd_min = []
mjd_max = []
run_type = []
fwhm_min = []
fwhm_max = []
comment = []
row = 1
# 2012-09-21 KWS Discovered that PyFITS3 doesn't allow implicit creation of
# double arrays from integer lists. Need to cast integers
# as floats.
for result in results:
rownum.append(row)
project.append(camera)
survey_name.append('null')
ipp_release.append('null')
job_type.append('stamp')
option_mask.append(optionMask) # Changed to 2049 for unconvolved stacks
req_type.append(psRequestType)
img_type.append(imageType) # Hard wired to warp for FGSS 3pi data
id.append(result["warp_id"]) # This should contain the warp ID as extracted from the GPC1 database
tess_id.append('RINGS.V3')
component.append(skycell)
coord_mask.append(coordMask)
center_x.append(float(result["ra_psf"]))
center_y.append(float(result["dec_psf"]))
width.append(float(xsize))
height.append(float(ysize))
#label.append('null')
data_group.append('null')
reqfilt.append('null')
mjd_min.append(0)
mjd_max.append(0)
run_type.append('null')
fwhm_min.append(0)
fwhm_max.append(0)
# Added IPP_IDET to list of columns selected
try:
if result["comment"]:
comment.append(result["comment"])
else:
comment.append('%s_%s_%s_%d_%s' % (str(result["id"]), result["tdate"], result["imageid"], result["ipp_idet"], "target")) # Hard wired "target" as image type
except KeyError as e:
comment.append('%s_%s_%s_%d_%s' % (str(result["id"]), result["tdate"], result["imageid"], result["ipp_idet"], "target")) # Hard wired "target" as image type
row = row + 1
# Create the FITS columns.
rownum_col = pf.Column(name='ROWNUM', format='J', array=rownum)
project_col = pf.Column(name='PROJECT', format='16A', array=project)
survey_name_col = pf.Column(name='SURVEY_NAME', format='16A', array=survey_name)
ipp_release_col = pf.Column(name='IPP_RELEASE', format='16A', array=ipp_release)
job_type_col = pf.Column(name='JOB_TYPE', format='16A', array=job_type)
option_mask_col = pf.Column(name='OPTION_MASK', format='J', array=option_mask)
req_type_col = pf.Column(name='REQ_TYPE', format='16A', array=req_type)
img_type_col = pf.Column(name='IMG_TYPE', format='16A', array=img_type)
id_col = pf.Column(name='ID', format='16A', array=id)
tess_id_col = pf.Column(name='TESS_ID', format='64A', array=tess_id)
component_col = pf.Column(name='COMPONENT', format='64A', array=component)
coord_mask_col = pf.Column(name='COORD_MASK', format='J', array=coord_mask)
center_x_col = pf.Column(name='CENTER_X', format='D', array=center_x)
center_y_col = pf.Column(name='CENTER_Y', format='D', array=center_y)
width_col = pf.Column(name='WIDTH', format='D', array=width)
height_col = pf.Column(name='HEIGHT', format='D', array=height)
#label_col = pf.Column(name='LABEL', format='64A', array=label)
data_group_col = pf.Column(name='DATA_GROUP', format='64A', array=data_group)
reqfilt_col = pf.Column(name='REQFILT', format='16A', array=reqfilt)
mjd_min_col = pf.Column(name='MJD_MIN', format='D', array=mjd_min)
mjd_max_col = pf.Column(name='MJD_MAX', format='D', array=mjd_max)
run_type_col = pf.Column(name='RUN_TYPE', format='16A', array=run_type)
fwhm_min_col = pf.Column(name='FWHM_MIN', format='D', array=fwhm_min)
fwhm_max_col = pf.Column(name='FWHM_MAX', format='D', array=fwhm_max)
comment_col = pf.Column(name='COMMENT', format='64A', array=comment)
cols=pf.ColDefs([rownum_col,
project_col,
survey_name_col,
ipp_release_col,
job_type_col,
option_mask_col,
req_type_col,
img_type_col,
id_col,
tess_id_col,
component_col,
coord_mask_col,
center_x_col,
center_y_col,
width_col,
height_col,
data_group_col,
reqfilt_col,
mjd_min_col,
mjd_max_col,
run_type_col,
fwhm_min_col,
fwhm_max_col,
comment_col])
tbhdu=pf.BinTableHDU.from_columns(cols)
# The from_columns method only available from PyFITS 3.3 onwards.
#tbhdu=pf.BinTableHDU.from_columns(cols)
hdulist.append(tbhdu)
exthdr = hdulist[1].header
exthdr.set('EXTNAME','PS1_PS_REQUEST','name of this binary table extension')
exthdr.set('REQ_NAME',requestName,'Postage Stamp request name')
# 2015-08-26 KWS Updated contents of the header for version 2
exthdr.set('EXTVER','2','Extension version')
exthdr.set('ACTION','PROCESS')
exthdr.set('EMAIL',email,'Email address of submitter')
hdulist.writeto(outfile, clobber=True)
fileSuccessfullyWritten = True
return fileSuccessfullyWritten
|
636284d46cbaced8d0609afe988148cbc3111d32
| 32,038 |
def reduce_sequence(sequence, desired_length):
"""Reduces a sequence to the desired length by removing some of its elements uniformly."""
if len(sequence) < desired_length:
raise RuntimeError('Cannot reduce sequence to longer length.')
indexes = N.arange(desired_length) * len(sequence) / desired_length
return [sequence[i] for i in indexes]
|
25f104abc666e26821436a42f5cb71b99b41a86c
| 32,039 |
def encode_label(text):
"""Encode text escapes for the static control and button labels
The ampersand (&) needs to be encoded as && for wx.StaticText
and wx.Button in order to keep it from signifying an accelerator.
"""
return text.replace("&", "&&")
|
b4402604f87f19dab9dbda4273798374ee1a38d8
| 32,040 |
def dash_table_from_data_frame(df: pd.DataFrame, *, id, **kwargs):
"""Returns a dash_table.DataTable that will render `df` in a simple HTML table."""
df_all_columns = df.reset_index()
return dash_table.DataTable(
id=id,
columns=[{"name": i, "id": i} for i in df_all_columns.columns],
cell_selectable=False,
data=df_all_columns.to_dict("records"),
editable=False,
page_action="native",
**kwargs,
)
|
813bf054f33a4dc15dfcff300414f60fd9cf2973
| 32,041 |
from warnings import filterwarnings
def calcRSI(df):
"""
Calculates RSI indicator
Read about RSI: https://www.investopedia.com/terms/r/rsi.asp
Args:
df : pandas.DataFrame()
dataframe of historical ticker data
Returns:
pandas.DataFrame()
dataframe of calculated RSI indicators + original data
"""
filterwarnings("ignore")
df["price_change"] = df["adjclose"].pct_change()
df["Upmove"] = df["price_change"].apply(lambda x: x if x > 0 else 0)
df["Downmove"] = df["price_change"].apply(lambda x: abs(x) if x < 0 else 0)
df["avg_Up"] = df["Upmove"].ewm(span=19).mean()
df["avg_Down"] = df["Downmove"].ewm(span=19).mean()
df = df.dropna()
df["RS"] = df["avg_Up"] / df["avg_Down"]
df["RSI"] = df["RS"].apply(lambda x: 100 - (100 / (x + 1)))
return df
|
4c2c76159473bf8b23e24cb02af00841977c7cd3
| 32,043 |
def test_c_py_compose_transforms_module():
"""
Test combining Python and C++ transforms
"""
ds.config.set_seed(0)
def test_config(arr, input_columns, output_cols, op_list):
data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False)
data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols,
column_order=output_cols)
res = []
for i in data.create_dict_iterator(output_numpy=True):
for col_name in output_cols:
res.append(i[col_name].tolist())
return res
arr = [1, 0]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \
[[[False, True]],
[[True, False]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \
== [[[1, 1]], [[1, 1]]]
assert test_config(arr, ["cols"], ["cols"],
[py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \
== [[[2, 2]], [[2, 2]]]
assert test_config([[1, 3]], ["cols"], ["cols"],
[c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \
== [[2, 6, -2]]
arr = ([[1]], [[3]])
assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
|
e51191a48cc79bcac8cfe41508dd9e539da4645c
| 32,046 |
import struct
def add_header(input_array, codec, length, param):
"""Add the header to the appropriate array.
:param the encoded array to add the header to
:param the codec being used
:param the length of the decoded array
:param the parameter to add to the header
:return the prepended encoded byte array"""
return struct.pack(">i", codec) + struct.pack(">i", length) + struct.pack(">i", param) + input_array
|
228db86bb6eb9e3c7cc59cc48b67e443d46cc36d
| 32,047 |
import torch
def jaccard_loss(logits, true, eps=1e-7):
"""Computes the Jaccard loss, a.k.a the IoU loss.
Note that PyTorch optimizers minimize a loss. In this
case, we would like to maximize the jaccard loss so we
return the negated jaccard loss.
Args:
true: a tensor of shape [B, H, W] or [B, 1, H, W].
logits: a tensor of shape [B, C, H, W]. Corresponds to
the raw output or logits of the model.
eps: added to the denominator for numerical stability.
Returns:
jacc_loss: the Jaccard loss.
"""
num_classes = logits.shape[1]
if num_classes == 1:
true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
true_1_hot_f = true_1_hot[:, 0:1, :, :]
true_1_hot_s = true_1_hot[:, 1:2, :, :]
true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1)
pos_prob = torch.sigmoid(logits)
neg_prob = 1 - pos_prob
probas = torch.cat([pos_prob, neg_prob], dim=1)
else:
true_1_hot = torch.eye(num_classes)[true.squeeze(1)]
true_1_hot = true_1_hot.permute(0, 3, 1, 2).float()
probas = F.softmax(logits, dim=1)
true_1_hot = true_1_hot.type(logits.type())
dims = (0,) + tuple(range(2, true.ndimension()))
intersection = torch.sum(probas * true_1_hot, dims)
cardinality = torch.sum(probas + true_1_hot, dims)
union = cardinality - intersection
jacc_loss = (intersection / (union + eps)).mean()
return (1 - jacc_loss)
|
10e113294f67cbe88b61e90af51c6c6659ead805
| 32,048 |
def _get_positional_body(*args, **kwargs):
"""Verify args and kwargs are valid, and then return the positional body, if users passed it in."""
if len(args) > 1:
raise TypeError("There can only be one positional argument, which is the POST body of this request.")
if "options" in kwargs:
raise TypeError("The 'options' parameter is positional only.")
return args[0] if args else None
|
c777296ab9c0e95d0f4d7f88dfd4ae292bfc558f
| 32,049 |
def resize_image_with_padding(im, new_dims, interp_order=1):
"""
Resize an image array with interpolation.
Parameters
----------
im : (H x W x K) ndarray
new_dims : (height, width) tuple of new dimensions.
interp_order : interpolation order, default is linear.
Returns
-------
im : resized ndarray with shape (new_dims[0], new_dims[1], K)
"""
ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]),
dtype=np.float32)
ret.fill(0)
target_as = new_dims[1] / float(new_dims[0])
aspect_ratio = im.shape[1] / float(im.shape[0])
if target_as < aspect_ratio:
scale = new_dims[1] / float(im.shape[1])
scaled_width = int(new_dims[1])
scaled_height = min(int(new_dims[0]), int(scale* im.shape[0]))
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = 0
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
else:
scale = new_dims[0] / float(im.shape[0])
scaled_width = min(int(new_dims[1]), int(scale* im.shape[1]))
scaled_height = int(new_dims[0])
resized_img = cv2.resize(im, (scaled_width, scaled_height))
start_x = 0
start_y = int((new_dims[1] - scaled_width) / 2)
ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img
return ret.astype(np.float32)
|
d39195cdd20db2a7cd6750c7a81b419a62f820f9
| 32,050 |
from typing import List
from typing import Dict
def _get_run_stats(calc_docs: List[Calculation]) -> Dict[str, RunStatistics]:
"""Get summary of runtime statistics for each calculation in this task."""
run_stats = {}
total = dict(
average_memory=0.0,
max_memory=0.0,
elapsed_time=0.0,
system_time=0.0,
user_time=0.0,
total_time=0.0,
cores=0,
)
for calc_doc in calc_docs:
stats = calc_doc.output.run_stats
run_stats[calc_doc.task_name] = stats
total["average_memory"] = max(total["average_memory"], stats.average_memory)
total["max_memory"] = max(total["max_memory"], stats.max_memory)
total["cores"] = max(total["cores"], stats.cores)
total["elapsed_time"] += stats.elapsed_time
total["system_time"] += stats.system_time
total["user_time"] += stats.user_time
total["total_time"] += stats.total_time
run_stats["overall"] = RunStatistics(**total)
return run_stats
|
fb83c559ced3ca44eaee767d6dabf8c183779f7f
| 32,051 |
def mvn_log_pdf(x, mean, covariance):
"""
This function calculates the log-likelihood of x for a multivariate normal distribution parameterised by the
provided mean and covariance.
:param x: The location(s) to evaluate the log-likelihood. Must be [B x D], where B is the batch size and D is the
dimensionality of the multivariate normal. B can be 1.
:param mean: The mean of the multivariate normal distribution. Must be [1 x D].
:param covariance: The covariance of the multivariate normal distribution. Must be [D x D].
:return: The log-likelihood values evaluated at x. This is a B-length vector.
"""
# Determine number of dimensions of the multivariate normal distribution.
num_dims = tf.shape(covariance, out_type=TF_DTYPE)[-1]
# num_dims = covariance.get_shape().as_list()[-1]
# Calculate log-likelihood.
diff = tf.transpose(x - mean) # [D x B].
chol_covar = tf.cholesky(tf.squeeze(covariance)) # [D x D].
alpha = tf.transpose(tf.matrix_triangular_solve(chol_covar, diff, lower=True)) # [B x D].
beta = tf.reduce_sum(tf.log(tf.diag_part(chol_covar)))
return -0.5 * (tf.reduce_sum(tf.square(alpha), axis=-1) + num_dims * np.log(2.0 * np.pi)) - beta
|
dc2b019ace6760a040d97045b50b552e937706fd
| 32,053 |
def question_input (user_decision=None):
"""Obtains input from user on whether they want to scan barcodes or not.
Parameters
----------
user_decision: default is None, if passed in, will not ask user for input. string type.
Returns
-------
True if user input was 'yes'
False is user input was anything else
"""
# Ask user if they would like to scan a barcode, and obtain their input
if user_decision == None:
decision = input("Would you like to scan a barcode? Type 'yes' to begin. ")
else:
decision = user_decision
# Return boolean value based on user response
if decision == 'yes':
return True
else:
return False
|
afb7f3d4eef0795ad8c4ff7878e1469e07ec1875
| 32,054 |
def depth(d):
"""Check dictionary depth"""
if isinstance(d, dict):
return 1 + (max(map(depth, d.values())) if d else 0)
return 0
|
6fd72b255a5fba193612cfa249bf4d242b315be1
| 32,055 |
import logging
def validate_analysis_possible(f):
"""
Decorator that validates that the amount of information is
sufficient for attractor analysis.
:param f: function
:return: decorated function
"""
def f_decorated(*args, **kwargs):
db_conn, *_ = args
if db_conn.root.n_aggregated_attractors() == 1 or \
db_conn.root.total_frequency() <= 2:
logging.getLogger().info('Not enough attractors to infer node correlations.')
return None
else:
return f(*args, **kwargs)
return f_decorated
|
9de0cbf2e18e47d14912ae3ebdff526a73f2c25d
| 32,056 |
def get_rel(href, method, rule):
"""Returns the `rel` of an endpoint (see `Returns` below).
If the rule is a common rule as specified in the utils.py file, then that rel is
returned.
If the current url is the same as the href for the current route, `self` is
returned.
Args:
href (str): the full endpoint url (e.g. https://alegna-api.nerevu.com/v1/data)
method (str): an HTTP method (e.g. 'GET' or 'DELETE')
rule (str): the endpoint path (e.g. '/v1/data/<int:id>')
Returns:
rel (str): a string representing what the endpoint does
Examples:
>>> href = 'https://alegna-api.nerevu.com/v1/data'
>>> method = 'GET'
>>> rule = '/v1/data'
>>> get_rel(href, method, rule)
'data'
>>> method = 'DELETE'
>>> get_rel(href, method, rule)
'data_delete'
>>> method = 'GET'
>>> href = 'https://alegna-api.nerevu.com/v1'
>>> rule = '/v1
>>> get_rel(href, method, rule)
'home'
"""
if href == request.url and method == request.method:
rel = "self"
else:
# check if route is a common route
resourceName = get_resource_name(rule)
rel = get_common_rel(resourceName, method)
# add the method if not common or GET
if not rel:
rel = resourceName
if method != "GET":
rel = f"{rel}_{method.lower()}"
# get params and add to rel
params = get_params(rule)
joined_params = "_".join(params)
if joined_params:
rel = f"{rel}_{joined_params}"
return rel
|
e1e5af2baabec766f07460275d7525569439b40c
| 32,059 |
def is_exist(self, connectivity):
"""Check the existence of a cell defined by a connectivity (vector of points indices).
The order of points indices does not matter.
Parameters
----------
self : CellMat
an CellMat object
connectivity : ndarray
an array of node tags
Returns
-------
bool
True if the element already exist
"""
# Check the existence of the element
e = np.array([], dtype=int)
for nd_tag in connectivity:
e = np.concatenate((e, self.get_point2cell(nd_tag)))
unique, unique_counts = np.unique(e, return_counts=True)
for ie in range(len(unique)):
if unique_counts[ie] == self.nb_pt_per_cell and unique_counts[ie] == len(
connectivity
):
# If this condition is valid, the element already exist
return True
return False
|
59f111040ba158fa03e82400c1d3cb9dc1444601
| 32,060 |
from typing import Optional
from typing import cast
def get_current_identity_arn(boto3_session: Optional[boto3.Session] = None) -> str:
"""Get current user/role ARN.
Parameters
----------
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
str
User/role ARN.
Examples
--------
>>> import awswrangler as wr
>>> arn = wr.sts.get_current_identity_arn()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
return cast(str, _utils.client(service_name="sts", session=session).get_caller_identity().get("Arn"))
|
892ffdd35d8a31849f4a53b0048a5bf87be624ce
| 32,061 |
def remove_whitespace(sentences):
"""
Clear out spaces and newlines
from the list of list of strings.
Arguments:
----------
sentences : list<list<str>>
Returns:
--------
list<list<str>> : same strings as input,
without spaces or newlines.
"""
return [[w.rstrip() for w in sent] for sent in sentences]
|
ed50124aec20feba037ea775490ede14457d6943
| 32,063 |
def generate_jwt(payload, expiry, secret=None):
"""
生成jwt
:param payload: dict 载荷
:param expiry: datetime 有效期
:param secret: 密钥
:return: jwt
"""
_payload = {'exp': expiry}
_payload.update(payload)
if not secret:
secret = current_app.config['JWT_SECRET']
token = jwt.encode(_payload, secret, algorithm='HS256')
return token
|
aa4727b7d26a7f00b015cbaed9a977c5865eedca
| 32,064 |
import json
import secrets
def authentication(uuid):
"""Allow a client to request/recieve an authentication key."""
if request.method == "POST":
with peewee_db.atomic():
if RestClient.get_or_none(RestClient.uuid == uuid):
return json.jsonify({"msg": "UUID already exits."}), 409
else:
authk = secrets.token_urlsafe()
x = RestClient(uuid=uuid, authkey=authk)
x.save()
return json.jsonify({"msg": "RestClient saved.",
"url": "/authentication/" + uuid}), 201
elif request.method == "GET":
with peewee_db.atomic():
query = RestClient.select().where(RestClient.uuid == uuid)
if len(query) > 0:
return json.jsonify({"authkey": query[0].authkey})
else:
return json.jsonify({"msg": "UUID not registered."}), 404
|
ec984b03c17917b12eb7ed6034c28b40f42aac63
| 32,065 |
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1.,
low_counts_threshold=1e-8):
"""From openai.baselines.common.plot_util.py
perform one-sided (causal) EMA (exponential moving average)
smoothing and resampling to an even grid with n points.
Does not do extrapolation, so we assume
xolds[0] <= low && high <= xolds[-1]
Arguments
---------
xolds : array or list
x values of data. Needs to be sorted in ascending order
yolds : array of list
y values of data. Has to have the same length as xolds
low : float
min value of the new x grid. By default equals to xolds[0]
high : float
max value of the new x grid. By default equals to xolds[-1]
n : int
number of points in new x grid
decay_steps : float
EMA decay factor, expressed in new x grid steps.
low_counts_threshold: float or int
y values with counts less than this value will be set to NaN
Returns
-------
xs : array
with new x grid
ys : array
of EMA of y at each point of the new x grid
count_ys : array
of EMA of y counts at each point of the new x grid
"""
low = xolds[0] if low is None else low
high = xolds[-1] if high is None else high
assert xolds[0] <= low, \
f'low={low} < xolds[0]={xolds[0]} - extrapolation not permitted!'
assert xolds[-1] >= high, \
f'high={high} > xolds[-1]={xolds[-1]} - extrapolation not permitted!'
assert len(xolds) == len(yolds), \
f'len of xolds ({len(xolds)}) and yolds ({len(yolds)}) do not match!'
xolds = xolds.astype('float64')
yolds = yolds.astype('float64')
luoi = 0 # last unused old index
sum_y = 0.
count_y = 0.
xnews = np.linspace(low, high, n)
decay_period = (high - low) / (n - 1) * decay_steps
interstep_decay = np.exp(- 1. / decay_steps)
sum_ys = np.zeros_like(xnews)
count_ys = np.zeros_like(xnews)
for i in range(n):
xnew = xnews[i]
sum_y *= interstep_decay
count_y *= interstep_decay
while True:
if luoi >= len(xolds):
break
xold = xolds[luoi]
if xold <= xnew:
decay = np.exp(- (xnew - xold) / decay_period)
sum_y += decay * yolds[luoi]
count_y += decay
luoi += 1
else:
break
sum_ys[i] = sum_y
count_ys[i] = count_y
ys = sum_ys / count_ys
ys[count_ys < low_counts_threshold] = np.nan
return xnews, ys, count_ys
|
17f14cd7a775c347366f375dfecef6285dc55af7
| 32,066 |
def settings_value(setting_name):
"""Return value for a given setting variable.
{% settings_value "LANGUAGE_CODE" %}
"""
return getattr(settings, setting_name, "")
|
aab0b2f16f0fa66a1c4066382b0b96c6c2a15215
| 32,067 |
def mongo_stat(server, args_array, **kwargs):
"""Method: mongo_stat
Description: Function stub holder for mongo_perf.mongo_stat.
Arguments:
(input) server
(input) args_array
(input) **kwargs
class_cfg
"""
status = True
if server and args_array and kwargs.get("class_cfg", True):
status = True
return status
|
45ae8fd66a1d0cae976959644837fae585d68e65
| 32,068 |
import pickle
def train_new_TFIDF(docs, save_as=None):
"""
Trains a new TFIDF model.\n
If a user abstract is given, it is used for the training.
Parameters
----------
docs : `[String]`. Documents to train on\n
save_as : `String`. Name to save model as.
Returns
-------
`TfidfVectorizer : The newly trained TFIDF model
"""
print("Started training TFIDF")
objectives = prepare_documents_for_tfidf(docs)
# creating tfidf model with given parameters (not trained yet)
if len(docs) == 1:
tfidf = init_tfidf_model(max_df=1.0)
else:
tfidf = init_tfidf_model()
# Fit the TfIdf model. Learn vocab and IDF
tfidf.fit(objectives)
print("Finished training TFIDF")
if (save_as):
pickle.dump(tfidf, open(
"custom_logic/src/models/" + save_as + ".sav", 'wb')
)
return tfidf
|
be520b62fa6f718eeb185e59fed5fe3edf8d7ea7
| 32,069 |
import math
def generate_sphere_points(n):
"""
Returns list of coordinates on a sphere using the Golden-
Section Spiral algorithm.
"""
points = []
inc = math.pi * (3 - math.sqrt(5))
offset = 2 / float(n)
for k in range(int(n)):
y = k * offset - 1 + (offset / 2)
r = math.sqrt(1 - y*y)
phi = k * inc
points.append(v3.vector(math.cos(phi)*r, y, math.sin(phi)*r))
return points
|
6349f001709c2d2958cc2bcdd9bfe9c70a79b8f9
| 32,070 |
def get_options():
"""
Purpose:
Parse CLI arguments for script
Args:
N/A
Return:
N/A
"""
parser = ArgumentParser(description="Produce to Kafka Topic")
required = parser.add_argument_group("Required Arguments")
optional = parser.add_argument_group("Optional Arguments")
# Optional Arguments
optional.add_argument(
"-R", "--replication", "--topic-replication",
dest="topic_replication",
help="Replication factor of the topic to create",
required=False,
default=1,
type=int,
)
optional.add_argument(
"-P", "--partitions", "--topic-partitions",
dest="topic_partitions",
help="Number of partitions of the topic to create",
required=False,
default=1,
type=int,
)
# Required Arguments
required.add_argument(
"-B", "--broker", "--brokers", "--kafka-broker", "--kafka-brokers",
action="append",
dest="kafka_brokers",
help="Kafka Brokers",
required=True,
type=str,
)
required.add_argument(
"-T", "--topic", "--kafka-topic", "--topic-name",
dest="topic_name",
help="Topic name to create",
required=True,
type=str,
)
return parser.parse_args()
|
a6168fb549fff9b2634f4c55cc625b7e3e387fa7
| 32,071 |
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type):
"""Estimate the log Gaussian probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
means : array-like of shape (n_components, n_features)
precisions_chol : array-like
Cholesky decompositions of the precision matrices.
'full' : shape of (n_components, n_features, n_features)
'tied' : shape of (n_features, n_features)
'diag' : shape of (n_components, n_features)
'spherical' : shape of (n_components,)
covariance_type : {'full', 'tied', 'diag', 'spherical'}
Returns
-------
log_prob : array, shape (n_samples, n_components)
"""
n_samples, n_features = X.shape
n_components, _ = means.shape
# det(precision_chol) is half of det(precision)
log_det = _compute_log_det_cholesky(
precisions_chol, covariance_type, n_features)
if covariance_type == 'full':
log_prob = np.empty((n_samples, n_components))
#print('mm', n_samples, n_features, n_components)
for i, x in enumerate(X):
#print('x', i, x)
for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)):
pp = 0.0
for f in range(x.shape[0]):
dot_m = 0.0
dot_x = 0.0
for p in range(prec_chol.shape[0]):
dot_m += (mu[p] * prec_chol[p,f])
dot_x += (x[p] * prec_chol[p,f])
y = (dot_x - dot_m)
pp += ( y * y )
#print('k', k, '\n', mu, '\n', prec_chol)
dot_x = np.dot(x, prec_chol)
dot_m = np.dot(mu, prec_chol)
y = dot_x - dot_m
#print('dot_x', dot_x)
#print('dot_m', dot_m)
#print('y', y)
p = np.sum(np.square(y), axis=0) # sum over features
#assert p == pp, (p, pp)
#print("log_prob", i, k, p)
log_prob[i, k] = p
elif covariance_type == 'tied':
log_prob = np.empty((n_samples, n_components))
for k, mu in enumerate(means):
y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol)
log_prob[:, k] = np.sum(np.square(y), axis=1)
elif covariance_type == 'diag':
precisions = precisions_chol ** 2
log_prob = (np.sum((means ** 2 * precisions), 1) -
2. * np.dot(X, (means * precisions).T) +
np.dot(X ** 2, precisions.T))
elif covariance_type == 'spherical':
precisions = precisions_chol ** 2
log_prob = (np.sum(means ** 2, 1) * precisions -
2 * np.dot(X, means.T * precisions) +
np.outer(row_norms(X, squared=True), precisions))
s = -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det
#print('s', s, 'log_det\n', log_det)
return s
|
c8d50ca609dfd877463ae20c572ec1ab60960b21
| 32,072 |
def descendants(region_id, allowed_ids, rm: RegionMeta):
"""Get all filtered descendant IDs of a given region ID.
A descendant is only accepted if it's in ``allowed_ids`` or is a
leaf region.
This is mimicking Dimitri's algorithm, I'm not sure about why this must
be that way.
"""
all_descendants = set()
for child_id in rm.children(region_id):
if child_id in allowed_ids or rm.is_leaf(child_id):
all_descendants.add(child_id)
all_descendants |= descendants(child_id, allowed_ids, rm)
return all_descendants
|
b0d1a5b57c00335343e52fbfe6e73b47bbccda42
| 32,073 |
import json
def search_classification(request):
"""
Filters the classification by name.
"""
filters = json.loads(request.GET.get('filters', {}))
fields = filters.get('fields', [])
page = int_arg(request.GET.get('page', 1))
classifications = Classification.objects.get_queryset()
if 'name' in fields:
method = filters.get('method', 'ieq')
if method == 'ieq':
classifications = classifications.filter(name__iexact=filters['name'])
elif method == 'icontains':
classifications = classifications.filter(name__icontains=filters['name'])
classifications = classifications.annotate(Count('ranks'))
classifications_list = []
if classifications:
for classification in classifications:
classifications_list.append({
"id": classification.id,
"name": classification.name,
'can_modify': classification.can_modify,
'can_delete': classification.can_delete,
'label': classification.get_label(),
'description': classification.description,
'num_classification_ranks': classification.ranks__count
})
response = {
'items': classifications_list,
'page': page
}
return HttpResponseRest(request, response)
|
fd1b78a9169c3496ee0b5d286d03d81cf75473c9
| 32,074 |
import click
def get_zone_id(ctx, param, zone_name):
"""Return the id for a zone by name."""
del ctx #unused
del param #unused
cf = CloudFlare.CloudFlare()
zones = cf.zones.get(params={'name': zone_name})
if len(zones) != 1:
raise click.ClickException('Invalid zone name: {}'.format(zone_name))
return (zones[0]['id'], zones[0]['name'])
|
07ebf939fe08b9f146ddb871af97e59c88e9484d
| 32,075 |
from typing import List
def solve(letters: List[str], dictionary: trie.Node) -> List[str]:
"""Finds all words that can be made using the given letters.
"""
center_letter = letters[0]
words = set()
queue = deque([letter for letter in letters])
while queue:
candidate = queue.popleft()
is_word, is_prefix = trie.has_word_has_prefix(dictionary, candidate)
if is_word and center_letter in candidate:
words.add(candidate)
if is_prefix:
queue.extend([candidate + letter for letter in letters])
return list(words)
|
8a1773c6c388b88cc4b208ca24d5c0b8249722d0
| 32,076 |
from datetime import datetime
def datetime_from_milliseconds_since_epoch(ms_since_epoch: int, timezone: datetime.timezone = None) -> datetime.datetime:
"""Converts milliseconds since epoch to a datetime object.
Arguments:
----------
ms_since_epoch {int} -- Number of milliseconds since epoch.
Keyword Arguments:
--------
timezone {datetime.timezone} -- The timezone of the new datetime object. (default: {None})
Returns:
--------
datetime.datetime -- A python datetime object.
"""
return datetime.datetime.fromtimestamp((ms_since_epoch / 1000), tz=timezone)
|
95528da79c78ca9956d656067b5be623058b12e6
| 32,077 |
def input_file(path):
"""
Read common text file as a stream of (k, v) pairs where k is line number
and v is line text
:param path: path to the file to read
:return: lazy seq of pairs
"""
return zip(count(), __input_file(path))
|
d1699863f790181bdbd5ea1abc08446e32909ffc
| 32,078 |
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1):
"""
Calculates the likelihood of a constant-rate birth-death process, conditioned
on the waiting times of a phylogenetic tree and degree of incomplete sampling.
Based off of the R function `TreePar::LikConstant` written by Tanja Stadler.
T. Stadler. On incomplete sampling under birth-death models and connections
to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009.
Args:
vec (float, float): two element tuple of birth and death
rho (float): sampling fraction
t (list): vector of waiting times
root (bool): include the root or not? (default: 1)
survival (bool): assume survival of the process? (default: 1)
Returns:
float: a likelihood
"""
l = vec[0] # noqa: E741
m = vec[1]
t.sort(reverse=True)
lik = (root + 1) * log(p1(t[0], l, m, rho))
for tt in t[1:]:
lik += log(l) + log(p1(tt, l, m, rho))
if survival == 1:
lik -= (root + 1) * log(1 - p0(t[0], l, m, rho))
return -lik
|
bfb74866eec3c6eedbd6536522403f08340d39d7
| 32,079 |
def window_bounds(
window: Window,
affine: Affine,
offset: str = 'center'
) -> tuple[float, float, float, float]:
"""Create bounds coordinates from a rasterio window
Parameters:
window: Window
affine: Affine
offset: str
Returns:
coordinate bounds (w, s, e, n)
"""
(row_start, col_start), (row_stop, col_stop) = window
w, s = xy(affine, row_stop, col_start, offset=offset)
e, n = xy(affine, row_start, col_stop, offset=offset)
bounds = (w, s, e, n)
return bounds
|
6d6dca039213b4f5ea85d9168172b5cb32ff1a1e
| 32,080 |
import copy
def get_k8s_model(model_type, model_dict):
"""
Returns an instance of type specified model_type from an model instance or
represantative dictionary.
"""
model_dict = copy.deepcopy(model_dict)
if isinstance(model_dict, model_type):
return model_dict
elif isinstance(model_dict, dict):
# convert the dictionaries camelCase keys to snake_case keys
model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict)
# use the dictionary keys to initialize a model of given type
return model_type(**model_dict)
else:
raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
|
217c517b53acb596eec51773f856ceaf15a93597
| 32,081 |
def merge_specs(specs_):
"""Merge TensorSpecs.
Args:
specs_: List of TensorSpecs to be merged.
Returns:
a TensorSpec: a merged TensorSpec.
"""
shape = specs_[0].shape
dtype = specs_[0].dtype
name = specs_[0].name
for spec in specs_[1:]:
assert shape[1:] == spec.shape[1:], "incompatible shapes: %s, %s" % (
shape, spec.shape)
assert dtype == spec.dtype, "incompatible dtypes: %s, %s" % (
dtype, spec.dtype)
shape = merge_shapes((shape, spec.shape), axis=0)
return specs.TensorSpec(
shape=shape,
dtype=dtype,
name=name,
)
|
fb0c895847c477cc90eb3b495505fe436667fe1e
| 32,082 |
def makeMapItem(pl_id):
""" Recupere les items d'un player sur la map (stand ou pub). Utilise les fonctions makeMapItemStand et Pub.
:param arg1: id du joueur
:type arg1: int
:return: collection des objets appartenant au joueur, avec leur position
:rtype: Collection d'objets Json
"""
mapItem = []
mapItem.append(makeMapItemStand(pl_id))
pub_id = db.select("SELECT p.p_id FROM Pub p WHERE p.pl_id = "+ str(pl_id))
if len(pub_id) != 0:
for row in pub_id:
mapItem.append(makeMapItemPub(row['pub_id']))
return (mapItem)
|
5bb5745dc161d74b2bd832a213c55906bc5f358c
| 32,083 |
def get_test_result_records(page_number, per_page, filters):
"""Get page with applied filters for uploaded test records.
:param page_number: The number of page.
:param per_page: The number of results for one page.
:param filters: (Dict) Filters that will be applied for records.
"""
return IMPL.get_test_result_records(page_number, per_page, filters)
|
89fc8b6d441ec8830cdb16fccfa142eed39f6c6a
| 32,084 |
def new_graph(**kwargs) -> Plot:
"""[summary]
:return: [description]
:rtype: Plot
"""
return GraphPlot(kwargs)
|
0bdbe97f6d86ba7dcfe802b5d41cb2e61fbb0ea7
| 32,085 |
def _average_path_length(n_samples_leaf):
"""
Taken from sklearn implementation of isolation forest:
https://github.com/scikit-learn/scikit-learn/blob/fd237278e/sklearn/ensemble/_iforest.py#L480
For each given number of samples in the array n_samples_leaf, this calculates average path length of unsucceesful
BST search.
Args:
n_samples_leaf: array of number of samples (in leaf)
Returns:
array of average path lengths
"""
n_samples_leaf_shape = n_samples_leaf.shape
n_samples_leaf = n_samples_leaf.reshape((1, -1))
average_path_length = np.zeros(n_samples_leaf.shape)
mask_1 = n_samples_leaf <= 1
mask_2 = n_samples_leaf == 2
not_mask = ~np.logical_or(mask_1, mask_2)
average_path_length[mask_1] = 0.0
average_path_length[mask_2] = 1.0
average_path_length[not_mask] = (
2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma)
- 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask]
)
return average_path_length.reshape(n_samples_leaf_shape)
|
d6434c4ed437e0f8bff9e5d9f25bfdc84ce8f82d
| 32,086 |
import pkg_resources
def get_pkg_license(pkgname):
"""
Given a package reference (as from requirements.txt),
return license listed in package metadata.
NOTE: This function does no error checking and is for
demonstration purposes only.
"""
pkgs = pkg_resources.require(pkgname)
pkg = pkgs[0]
for line in pkg.get_metadata_lines('PKG-INFO'):
(k, v) = line.split(': ', 1)
if k == "License":
return v
return None
|
238f2b3d33de6bf8ebfcca8f61609a58357e6da1
| 32,087 |
def parse(date):
"""
convert date from different input formats:
Parameters
----------
date : STR
FLOAT (unix timestamp)
Python native datetime.date object
pandas datetime object
Returns
-------
datetime.date
"""
out = False
try:
if isinstance(date, str):
date = pd.to_datetime(date).date()
out = True
if isinstance(date, float) or isinstance(date, int):
date = pd.to_datetime(date, unit='s').date()
out = True
if not out:
date = pd.to_datetime(date).date()
except:
date = None
return date
|
e7de2f8198c177630dfd75980b25fe5e9a70e0a2
| 32,088 |
def check_table_exist(conn):
"""Check if a table exists.
We do not use IF EXISTS in creating the table so as to we will not create
hyper table twice when the table already exists.
Args:
conn (psycopg2.extensions.connection): The connection to PostgreSQL database.
Returns:
True if table exists. False is table does not exist.
"""
CHECK_SQL = """
SELECT EXISTS (
SELECT FROM pg_tables
WHERE tablename = '%s'
);
""" % TABLENAME
cur = conn.cursor()
cur.execute(CHECK_SQL)
result = cur.fetchall()[0][0]
cur.close()
return result
|
0d9199464c0323f5258e4ca6638b5a5c6759b434
| 32,089 |
import traceback
def get_err_str(exception, message, trace=True):
"""Return an error string containing a message and exception details.
Args:
exception (obj): the exception object caught.
message (str): the base error message.
trace (bool): whether the traceback is included (default=True).
"""
if trace:
trace_str = "".join(traceback.format_tb(exception.__traceback__)).strip()
err_str = "{}\nTYPE: {}\nDETAILS: {}\nTRACEBACK:\n\n{}\n" \
"".format(message, type(exception), exception, trace_str)
else:
err_str = "{}\nTYPE: {}\nDETAILS: {}\n".format(message, type(exception), exception)
return err_str
|
0ede3de80fb1097b0537f90337cf11ffa1edecf7
| 32,090 |
def epoch_data(data, window_length = 2,overlap=0.5):
"""
Separates the data into equal sized windows
Input:
- data: data to seperate into windows
- window_length: length of the window in seconds
- overlap: overlap, float in [0,1), in percentage overlap of windows
Output:
an array of windows, each
"""
sample_rate = 250 # Hz
array_epochs = []
i = 0
window_size_hz = int(window_length * sample_rate)
overlap_size_hz = int(overlap * window_length * sample_rate)
while(i <= len(data)-window_size_hz ):
array_epochs.append(data[i:i+ window_size_hz ])
i = i + window_size_hz - overlap_size_hz # This is what the raw data looks like
# if i is not len(data) - 1:
# array_epochs.append(data[i:len(data)])
return np.array(array_epochs)
|
ab15ea4927118ed36ccd9d161772de7457239374
| 32,091 |
def dailyUsagePer15minn(index_list, tank_data_list):
"""Process tank temperatures series to water usage series divided into 96 15 minutes intervals, where each
interval has value in liters equal to used normalized hot water(37deg of c).
:param index_list: list of indexes
:param tank_data_list: list of tank temperatures
:return: list of numbers, length = 96
"""
first_use = tank_data_list[index_list[1]]
hh_mm = first_use['time'].split('T')[1]
zero_intervals = int(hh_mm[0:2]) * 4 # from midnight till first use hours
zero_intervals += int(hh_mm[3:5]) // 15 # floor division for minutes
usage = [0] * zero_intervals # fill intervals till first use of day
for i in range(0, len(index_list) - 1, 2):
tank_before = float(tank_data_list[index_list[i]]['value'])
tank_after = float(tank_data_list[index_list[i + 1]]['value'])
water_before, water_after = wrapTempToWaterTemp(temp_before=tank_before, temp_after=tank_after)
used = calculateUsedWater(temp_tank_before=water_before, temp_tank_after=water_after)
# add new interval with usage or extend last, depending on time
if i > 0:
t = tank_data_list[index_list[i]]['time'].split('T')[1]
past = tank_data_list[index_list[i - 2]]['time'].split('T')[1]
if (int(t[0:2]) - int(past[0:2])) == 0 and ((int(t[3:5]) // 15) == (int(past[3:5]) // 15)):
usage[-1] = float(usage[-1]) + round(abs(normalizeUsedWater(
temp_tank_before=water_before, temp_tank_after=water_after, used_volume=used)), 2)
else:
usage.append(round(abs(normalizeUsedWater(temp_tank_before=water_before,
temp_tank_after=water_after, used_volume=used)), 2))
else:
usage.append(round(abs(normalizeUsedWater(temp_tank_before=water_before,
temp_tank_after=water_after, used_volume=used)), 2))
# fill gaps between intervals with usage by intervals with 0
if i + 2 <= len(index_list) - 1:
last_time = tank_data_list[index_list[i]]['time'].split('T')[1]
next_time = tank_data_list[index_list[i + 2]]['time'].split('T')[1]
hour_difference = (int(next_time[0:2]) - int(last_time[0:2]))
if hour_difference == 0:
gap = ((int(next_time[3:5]) // 15) - (int(last_time[3:5]) // 15)) - 1
if gap > 0:
usage.extend([0] * gap)
elif hour_difference == 1:
gap = (4 - ((int(last_time[3:5]) // 15) + 1)) + (int(next_time[3:5]) // 15)
if gap > 0:
usage.extend([0] * gap)
else:
gap = (4 - ((int(last_time[3:5]) // 15) + 1)) + (int(next_time[3:5]) // 15)
gap += (hour_difference - 1) * 4
if gap > 0:
usage.extend([0] * gap)
# fill intervals from last use of day till midnight
last_use = tank_data_list[index_list[-2]]
hh_mm = last_use['time'].split('T')[1]
gap = ((24 - int(hh_mm[0:2])) - 1) * 4
gap += 4 - ((int(hh_mm[3:5]) // 15) + 1)
if gap > 0:
usage.extend([0] * gap)
return usage
|
7699b4455376675312b2800896db4f75b37a1ada
| 32,092 |
def is_valid(number):
"""Check if the number provided is a valid CAS RN."""
try:
return bool(validate(number))
except ValidationError:
return False
|
7e05c8e05f779c6f06150d90ab34b18585dd4803
| 32,093 |
import torch
def get_kernel(kernel_type, input_dim, on_gpu=True, **kwargs):
"""
Initializes one of the following gpytorch kernels: RBF, Matern
Args:
kernel_type (str):
Kernel type ('RBF', Matern52', 'Spectral)
input_dim (int):
Number of input dimensions
(translates into number of kernel dimensions unless isotropic=True)
on_gpu (bool):
Sets default tensor type to torch.cuda.DoubleTensor
**lengthscale (list of two lists):
Determines lower (1st list) and upper (2nd list) bounds
for kernel lengthscale(s);
number of elements in each list is equal to the input dimensions
**isotropic (bool):
one kernel lengthscale in all dimensions
**n_mixtures (int):
number of mixtures for spectral mixture kernel
**precision (str):
Choose between single ('single') and double ('double') precision
Returns:
kernel object
"""
precision = kwargs.get("precision", "double")
if precision == 'single':
tensor_type = torch.FloatTensor
tensor_type_gpu = torch.cuda.FloatTensor
else:
tensor_type = torch.DoubleTensor
tensor_type_gpu = torch.cuda.DoubleTensor
if on_gpu and torch.cuda.is_available():
torch.set_default_tensor_type(tensor_type_gpu)
else:
torch.set_default_tensor_type(tensor_type)
lscale = kwargs.get('lengthscale')
isotropic = kwargs.get("isotropic")
nmix = kwargs.get("n_mixtures")
if nmix is None:
nmix = 4
if lscale is not None:
lscale = gpytorch.constraints.Interval(torch.tensor(lscale[0]),
torch.tensor(lscale[1]))
input_dim = 1 if isotropic else input_dim
kernel_book = lambda input_dim, lscale, **kwargs: {
'RBF': gpytorch.kernels.RBFKernel(
ard_num_dims=input_dim,
lengthscale_constraint=lscale
),
'Matern52': gpytorch.kernels.MaternKernel(
ard_num_dims=input_dim,
lengthscale_constraint=lscale
),
'Spectral': gpytorch.kernels.SpectralMixtureKernel(
ard_num_dims=input_dim,
num_mixtures=kwargs.get("nmix")
)
}
try:
kernel = kernel_book(input_dim, lscale, nmix=nmix)[kernel_type]
except KeyError:
print('Select one of the currently available kernels:',\
'"RBF", "Matern52", "Spectral"')
raise
return kernel
|
f9660ba9ef16a816a2377ac90531e6d2705a0659
| 32,094 |
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
685ff34e14c26fcc408e5d4f9219483118bfd3c0
| 32,095 |
def down_sample(source, freq_vocab, replacement='', threshold=1e-3, min_freq=0, seed=None, name=None):
"""Randomly down-sample high frequency tokens in `source` with `replacement` value.
Args:
source: string `Tensor` or `RaggedTensor` or `SparseTensor` of any shape, items to be sampled.
freq_vocab: `Counter` with frequencies vocabulary.
replacement: `string`, value to set instead of downsampled ones
threshold: `float`, items occurrence threshold.
min_freq: `int`, items below that frequency will be treated as unique.
seed: `int`, used to create a random seed (optional).
See @{tf.random.set_seed} for behavior.
name: `string`, a name for the operation (optional).
Returns:
A boolean `Tensor` of same shape as source: "keep" flags.
"""
with tf.name_scope(name or 'down_sample'):
if isinstance(source, sparse_tensor.SparseTensorValue) or isinstance(source, sparse_tensor.SparseTensor):
source = sparse_tensor.convert_to_tensor_or_sparse_tensor(source, dtype=tf.string, name=name)
else:
source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, dtype=tf.string, name=name)
if not tf.string.is_compatible_with(source.dtype):
raise RuntimeError('"Source" must have dtype compatible with "string". '
'Actual: {}'.format(source.dtype))
if isinstance(source, tf.SparseTensor):
return tf.SparseTensor(
values=down_sample(source.values, freq_vocab, replacement, threshold, min_freq, seed),
indices=source.indices,
dense_shape=source.dense_shape
)
elif isinstance(source, tf.RaggedTensor):
return source.with_flat_values(
down_sample(source.flat_values, freq_vocab, replacement, threshold, min_freq, seed)
)
keep = sample_mask(
source=source,
freq_vocab=freq_vocab,
threshold=threshold,
min_freq=min_freq,
seed=seed,
)
return tf.where(keep, source, replacement)
|
2209bcc48356f4d11c9151a80ab85069c8b6ad5b
| 32,096 |
import platform
import pickle
def load_pickle(f):
"""使用pickle加载文件"""
version = platform.python_version_tuple() # 取python版本号
if version[0] == '2':
return pickle.load(f) # pickle.load, 反序列化为python的数据类型
elif version[0] == '3':
return pickle.load(f, encoding='latin1')
raise ValueError("invalid python version: {}".format(version))
|
33db0ba6dbd8b1d2b3eba57e63d4069d91fbcb0b
| 32,097 |
def extract_hashtags(text_list):
"""Return a summary dictionary about hashtags in :attr:`text_list`
Get a summary of the number of hashtags, their frequency, the top
ones, and more.
:param list text_list: A list of text strings.
:returns summary: A dictionary with various stats about hashtags
>>> posts = ['i like #blue', 'i like #green and #blue', 'i like all']
>>> hashtag_summary = extract_hashtags(posts)
>>> hashtag_summary.keys()
dict_keys(['hashtags', 'hashtags_flat', 'hashtag_counts', 'hashtag_freq',
'top_hashtags', 'overview'])
>>> hashtag_summary['hashtags']
[['#blue'], ['#green', '#blue'], []]
A simple extract of hashtags from each of the posts. An empty list if
none exist
>>> hashtag_summary['hashtags_flat']
['#blue', '#green', '#blue']
All hashtags in one flat list.
>>> hashtag_summary['hashtag_counts']
[1, 2, 0]
The count of hashtags per post.
>>> hashtag_summary['hashtag_freq']
[(0, 1), (1, 1), (2, 1)]
Shows how many posts had 0, 1, 2, 3, etc. hashtags
(number_of_hashtags, count)
>>> hashtag_summary['top_hashtags']
[('#blue', 2), ('#green', 1)]
>>> hashtag_summary['overview']
{'num_posts': 3,
'num_hashtags': 3,
'hashtags_per_post': 1.0,
'unique_hashtags': 2}
"""
return extract(text_list, HASHTAG, 'hashtag')
|
688824fbef72c961b48a6bdb001983c25b1e0cee
| 32,098 |
def skipif_32bit(param):
"""
Skip parameters in a parametrize on 32bit systems. Specifically used
here to skip leaf_size parameters related to GH 23440.
"""
marks = pytest.mark.skipif(
compat.is_platform_32bit(), reason="GH 23440: int type mismatch on 32bit"
)
return pytest.param(param, marks=marks)
|
575226d01867ae1f898fc821fe1d51de5eb630fb
| 32,099 |
from typing import Any
def check_int(data: Any) -> int:
"""Check if data is `int` and return it."""
if not isinstance(data, int):
raise TypeError(data)
return data
|
814155f2407cd0e8b580372679f4cecfcc087d9e
| 32,100 |
def onehot_encoding(categories, max_categories):
"""Given a list of integer categories (out of a set of max_categories)
return one-hot enocded values"""
out_array = np.zeros((len(categories), max_categories))
for key, val in enumerate(categories):
out_array[key, int(val)] = 1.0
return out_array
|
89203b285faed64b4519a2ad5234b77b4fa837aa
| 32,102 |
def canny(gl_image, low_threshold=50, high_threshold=150):
"""Applies the Canny transform"""
return cv2.Canny(gl_image, low_threshold, high_threshold)
|
de2d7194e9df6ab4cc7cda25a0b5ccd37f81822b
| 32,103 |
def res_stage(block:nn.Module, ic:int, oc:int, num_layers:int, dflag:bool=True,
btype:str='basic', fdown:bool=False):
"""
Arguments
---------
block : nn.Module
the block type to be stacked one upon another
ic : int
# of input channels
oc : int
# of output channels
num_layers - int
# of blocks to be stacked
dflag : bool
Whether the first resblock needs to perform downsampling.
Defaults to True.
btype : str, should be one of ['basic'. 'bottleneck']
The type of resblock to be used. Defaults to 'basic'
fdown : bool
If true the side branch *must* contain a conv block,
whether it performs downsampling or not. Defaults to False.
Returns
-------
layers : list
A list containing all the nn.Module that is required for this layer.
"""
layers = [block(ic, oc, dflag=dflag, btype=btype, fdown=fdown)]
layers += [block(oc, oc, btype=btype) for i in range (num_layers-1)]
return layers
|
6cd95ba6d923265093daca2bdf888bde248dfd12
| 32,104 |
def getFirstValid(opts, default):
"""Returns the first valid entry from `opts`, or `default` if none found.
Valid is defined as ``if o`` returns true."""
for o in opts:
if o: return o
return default
|
799a6ea4a993f0a112fa38b882566d72a0d223e0
| 32,105 |
def grid_density_gaussian_filter(data, size, resolution=None, smoothing_window=None):
"""Smoothing grid values with a Gaussian filter.
:param [(float, float, float)] data: list of 3-dimensional grid coordinates
:param int size: grid size
:param int resolution: desired grid resolution
:param int smoothing_window: size of the gaussian kernels for smoothing
:return: smoothed grid values
:rtype: numpy.ndarray
"""
resolution = resolution if resolution else size
k = (resolution - 1) / size
w = smoothing_window if smoothing_window else int(0.01 * resolution) # Heuristic
imgw = (resolution + 2 * w)
img = np.zeros((imgw, imgw))
for x, y, z in data:
ix = int(x * k) + w
iy = int(y * k) + w
if 0 <= ix < imgw and 0 <= iy < imgw:
img[iy][ix] += z
z = ndi.gaussian_filter(img, (w, w)) # Gaussian convolution
z[z <= BLANK_THRESH] = np.nan # Making low values blank
return z[w:-w, w:-w]
|
d4c833aee72d28a760584cbd995595497d740531
| 32,108 |
def print_train_time(start, end, device=None):
"""Prints difference between start and end time.
Args:
start (float): Start time of computation (preferred in timeit format).
end (float): End time of computation.
device ([type], optional): Device that compute is running on. Defaults to None.
Returns:
float: time between start and end in seconds (higher is longer).
"""
total_time = end - start
print(f"\nTrain time on {device}: {total_time:.3f} seconds")
return total_time
|
9935f2c12bac8e8beca38075dd6f80b7211318b7
| 32,110 |
def Moving_Average_ADX(data, period=14, smooth=14, limit=18):
"""
Moving Average ADX
ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend
Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/
Translator: 阿财(Rgveda@github)(4910163#qq.com)
Parameters
----------
data : (N,) array_like
传入 OHLC Kline 序列。
The OHLC Kline.
period : int or None, optional
DI 统计周期 默认值为 14
DI Length period. Default value is 10.
smooth : int or None, optional
ADX 平滑周期 默认值为 14
ADX smoothing length period. Default value is 10.
limit : int or None, optional
ADX 限制阈值 默认值为 18
ADX MA Active limit threshold. Default value is 18.
Returns
-------
adx, ADXm : ndarray
ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨)
ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Neagtive, No Trend, Positive)
"""
up = data.high.pct_change()
down = data.low.pct_change() * -1
trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period)
plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur
minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur
# 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index
# 对不上,所以我选择补零替代dropna
plus = np.r_[np.zeros(period + 2), plus[(period + 2):]]
minus = np.r_[np.zeros(period + 2), minus[(period + 2):]]
sum = plus + minus
adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth)
adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]]
ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0))
return adx, ADXm
|
63595d9cc53999ae1e4b75c971b4388f092da649
| 32,112 |
def forbid_end(interval, function):
""" Forbids an interval variable to end during specified regions.
In the declaration of an interval variable it is only possible to specify a range of possible end times.
This function allows the user to specify more precisely when the interval variable can end.
In particular, the interval variable can end only at point *t* such that the function has non-zero value at
*t-1*.
When the interval variable is absent then this constraint is automatically satisfied,
since such interval variable does not't have any start at all.
Note the difference between *t* (end time of the interval variable) and *t-1*
(the point when the function value is checked). It simplifies the sharing of the same function
in constraints *forbid_start* and *forbid_end*.
It also allows one to use the same function as *intensity* parameter of interval variable.
Args:
interval: Interval variable being restricted.
function: If the function has value 0 at point *t*-1 then the interval variable interval cannot end at *t*.
Returns:
Constraint expression
"""
return CpoFunctionCall(Oper_forbid_end, Type_Constraint, (_convert_arg(interval, "interval", Type_IntervalVar),
_convert_arg(function, "function", Type_StepFunction)))
|
fee64b27578f78632ed84d2575a7b2dfb6de39e2
| 32,113 |
import re
def get_all_anime(total_pages: int) -> list:
"""
Get all the anime listed on all the pages of the website.
:param total_pages: Total number of pages of HorribleSubs.
:return: List containing the names of all the anime.
"""
titles = []
for page in range(1, total_pages + 1):
print(f"Processing page: {page}/{total_pages}")
url = f"https://nyaa.si/?f=0&c=0_0&q=[HorribleSubs]&p={page}"
soup = open_url(url)
tags = soup('a')
for tag in tags:
anime_id = tag.get('href', None)
temp = tag.get('title', None)
if temp and temp.startswith("[HorribleSubs]") and temp.endswith("[720p].mkv"):
anime_id = re.findall("view/([0-9]+)", anime_id)[0]
# temp = re.findall("\[HorribleSubs\] (.*?) - ([0-9]*) \[720p\].mkv", temp)
titles.append((temp, anime_id))
print("Done!")
print("Anime retrieval complete!")
return titles
|
91160353c7b488f21fbc7ed0a50974193a4b45bf
| 32,114 |
def network_generator(
rw: int,
cl: int,
b: float,
xi: float,
P: float,
mu: float,
bipartite: bool,
) -> ArrayLike:
"""
function to generate synthetic networks with nested, modular and in-block nested structures. Generates
networks with a fixed block size and increasing number of blocks (hence, increasing network size),
instead of networks with fixed size. This benchmark is a modification of the one introduced by
ASR et al (PRE 2018). If the number of columns nodes is not given, the function will assume that we want
to generate a unipartite network. The parameters must be passed respecting the following order.
inputs:
----------
rw: int >1
number of row nodes that form a block
cl: int >1
number of col nodes that form a block
B: number >=1
number of blocks on which the main matrix will be divided
xi: number, >=1
shape parameter to indicate how stylised is the nested curve
p: number in [0, 1]
paramteter that control the amount of noise outside a perfectly nested structure
mu: number in [0, 1]
parameteter that control the amount of noise outside the blocks
bipartite:
a boolean to indicate if you want to generate bipartite (True) or unipartite (False) networks
output:
----------
M: array
The synthetic network matrix with the predefined structure
example:
---------
network_matrix=network_generator(rw,cl,B,xi,p,mu)
"""
if rw < 3 or cl < 3:
raise ValueError("MATRIX TOO SMALL: row and col sizes should be larger than 3")
Mij = uniform(0, 1, size=(int(rw * b), int(cl * b)))
cy, cx = mod_param(int(rw * b), int(cl * b), b)
M_no = zeros(Mij.shape)
le = []
Pi = ((b - 1) * mu) / b
lb = 0
# for each block generate a nested structure
for ii in range(int(b)):
j, i = indices(M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)].shape)
# heaviside function to produce the nested structure
H = ((j[::-1, :] + 1) / cy) > ballcurve((i / cx), xi)
M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)] = H
le += [M_no[cy * ii : cy * (ii + 1), cx * ii : cx * (ii + 1)].sum()]
lb += cy * cx
# generate the nested structure of the remaining block
j, i = indices(M_no[(ii + 1) * cy :, (ii + 1) * cx :].shape)
# heaviside function to produce the nested structure
H = ((j[::-1, :] + 1) / j.shape[0]) > ballcurve((i / i.shape[1]), xi)
M_no[(ii + 1) * cy :, (ii + 1) * cx :] = H
le += [M_no[(ii + 1) * cy :, (ii + 1) * cx :].sum()]
lb += (int(rw * b) - ((ii + 1) * cy)) * (int(cl * b) - ((ii + 1) * cx))
Et = M_no.sum(dtype=int)
# prob of having a link outside blocks
p_inter = (mu * Et) / (lb * b) if ((lb * b) != 0) else 0
M_no[M_no == 0] = p_inter
for ix in range(int(b)):
j, i = indices(M_no[cy * ix : cy * (ix + 1), cx * ix : cx * (ix + 1)].shape)
Pr = (
(P * le[ix]) / ((cx * cy) - le[ix] + (P * le[ix]))
if ((cx * cy) - le[ix] + (P * le[ix])) != 0
else 0
)
# heaviside function to produce the nested structure
H = ((j[::-1, :] + 1) / cy) > ballcurve((i / cx), xi)
# prob of having a link within blocks
p_intra = ((1 - P + (P * Pr)) * H + Pr * (1 - H)) * (1 - Pi)
M_no[cy * ix : cy * (ix + 1), cx * ix : cx * (ix + 1)] = p_intra
# calculate to the remaining block
j, i = indices(M_no[(ix + 1) * cy :, (ix + 1) * cx :].shape)
Pr = (
(P * le[ix + 1])
/ (
((int(rw * b) - (ix + 1) * cy) * (int(cl * b) - (ix + 1) * cx))
- le[ix + 1]
+ (P * le[ix + 1])
)
if (le[ix + 1] > 0) & (P != 0)
else 0
)
# heaviside function to produce the nested structure
H = ((j[::-1, :] + 1) / j.shape[0]) > ballcurve((i / i.shape[1]), xi)
# prob of having a link within blocks
p_intra = ((1 - P + (P * Pr)) * H + Pr * (1 - H)) * (1 - Pi)
M_no[(ix + 1) * cy :, (ix + 1) * cx :] = p_intra
M = (M_no > Mij).astype(int)
if not bipartite:
fill_diagonal(M, 0)
M = triu(M, k=1) + (triu(M, k=1)).T
return M
|
f9c6d615b117a2aa7ee22b7615ddcdbcaf628a71
| 32,115 |
def convert_rgb_to_hex(rgb: tuple([int, int, int])) -> str:
"""Take an RGB value and convert it to hex code.
Args:
rgb: a color represented as rgb values.
Returns:
Hex code of color or None if the RGB code is invalid.
"""
# Validate user Input is not negative or greater than 255
for value in rgb:
if not 256 > value >= 0:
# Return nothing if any of the RGB values fail validation
return None
return "%02x%02x%02x" % rgb
|
c20c6a96dbe577eff4421df28403a57e1f038e4e
| 32,116 |
def localVarName(value, position):
"""A name of a class."""
if not value[0].islower():
return Error('BadLocalVariableName', 'Local variable must start with a lower case letter', position, LINES)
return None
|
d4f8838497109fcf41e9c904aaddc31edd69eadc
| 32,117 |
def nodeAndOutputFromScenegraphLocationString(string, dag):
"""
Returns a tuple containing the node defined in a location string and its
corresponding Output.
"""
try:
outputNodeUUID = uuidFromScenegraphLocationString(string)
outputNode = dag.node(nUUID=outputNodeUUID)
outputNodeOutputName = string.split(":")[3]
return (outputNode, outputNode.outputNamed(outputNodeOutputName))
except:
return(None, None)
|
d7fa1f03b3121f4dbcc9baac8c94bb5113fbc378
| 32,118 |
def rivers_with_station(stations):
"""Returns the names of rivers on which a station is situated"""
return set(stations_by_river(stations).keys())
|
c2e78ff18c3fdc73f04e145beae77d914a0ee287
| 32,119 |
def check_string_is_nonempty(string, string_type='string'):
"""Ensures input is a string of non-zero length"""
if string is None or \
(not isinstance(string, str)) or \
len(string) < 1:
raise ValueError('name of the {} must not be empty!'
''.format(string_type))
return string
|
527e60b35f6a827ee9b1eae3c9a3f7abc596b7ff
| 32,120 |
def last_frame_with_txt(vid, txt, duration):
"""Take the last frame from vid, show it for duration with txt overlay."""
frame = list(vid.iter_frames())[-1]
clip = ImageClip(frame, duration=duration)
return CompositeVideoClip([
clip,
TextClip(txt, font=MOVIEPY_FONT, color='black', bg_color='white',
fontsize=40)
.set_pos((10, 10)).set_duration(duration)])
|
e7a62332cb4ae69addc12bc679eb30479432caf2
| 32,121 |
import torch
def load_dataset(dataset_size=100, dataset_start=0, shuffle=True, sentence_level=False, n_authors=15, k=5, features=u""):
"""
Load dataset
:return:
"""
# Load from directory
if sentence_level:
reutersc50_dataset = torchlanguage.datasets.ReutersC50SentenceDataset(
n_authors=n_authors,
download=True,
dataset_size=dataset_size,
dataset_start=dataset_start
)
else:
reutersc50_dataset = torchlanguage.datasets.ReutersC50Dataset(
n_authors=n_authors,
download=True,
dataset_size=dataset_size,
dataset_start=dataset_start,
load_features=features
)
# end if
# Reuters C50 dataset training
reuters_loader_train = torch.utils.data.DataLoader(
torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='train', k=k),
batch_size=1,
shuffle=shuffle
)
# Reuters C50 dataset dev
reuters_loader_dev = torch.utils.data.DataLoader(
torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='dev', k=k),
batch_size=1,
shuffle=shuffle
)
# Reuters C50 dataset test
reuters_loader_test = torch.utils.data.DataLoader(
torchlanguage.utils.CrossValidationWithDev(reutersc50_dataset, train='test', k=k),
batch_size=1,
shuffle=shuffle
)
return reutersc50_dataset, reuters_loader_train, reuters_loader_dev, reuters_loader_test
|
2ace76a461699e9f0bdf7ca838d414a3c618898a
| 32,122 |
def df_as_table(dataframe, size='50'):
"""
:param dataframe: pandas dataframe to be displayed as a HTML table
:param size: string to set realtive table size in percent standard 50%
:return: string containing a html table
"""
shape = dataframe.shape
n_cols = shape[1]
n_rows = shape[0]
headers = list(dataframe.columns)
sides = list(dataframe.index.values)
local_text = "<table style=\"width:"+size+"%\">"
if headers != []:
if sides != []:
headers.insert(0, "-")
local_text += "<tr>"
for element in headers:
local_text += "<th>"+element+"</th>"
local_text += "</tr>"
for i in range(n_rows):
local_text += "<tr>"
if sides != []:
local_text += "<th>" + str(sides[i]) + "</th>"
for j in range(n_cols):
local_text += "<td>" + str(dataframe.iloc[i][j]) + "</td>"
local_text += "</tr>"
local_text += "</table>"
return local_text
|
3634a90b3e3d4ef5c8cc737e19a0540305528959
| 32,123 |
def ecdh(privkey, pubkey):
"""
Given a loaded private key and a loaded public key, perform an ECDH exchange
:param privkey:
:param pubkey:
:return:
"""
return ecdsa.ecdh(privkey, pubkey)
|
650607024f3fcd10fd7649897461c69a3d80596b
| 32,124 |
def portfolio_margin_account(self, **kwargs):
"""Get Portfolio Margin Account Info (USER_DATA)
GET /sapi/v1/portfolio/account
https://binance-docs.github.io/apidocs/spot/en/#get-portfolio-margin-account-info-user_data
Keyword Args:
recvWindow (int, optional): The value cannot be greater than 60000
"""
return self.sign_request("GET", "/sapi/v1/portfolio/account", {**kwargs})
|
88a1087d44187ed130211ab7d42fdcbb54a038f3
| 32,126 |
from typing import Any
from typing import Mapping
def init_hyperparams(*, class_name: str, hyperparams, hyperparams_class) -> Any:
"""
Construct a hyperparams object from either a mapping or another hyperparams object.
"""
if isinstance(hyperparams_class, type) and is_dataclass(hyperparams_class):
if hyperparams is None:
return hyperparams_class()
if isinstance(hyperparams, hyperparams_class):
return hyperparams
if isinstance(hyperparams, Mapping):
return hyperparams_class(**hyperparams)
raise WrongHyperparamsType(hyperparams=hyperparams, class_name=class_name)
raise YouForgotTheHyperparams(class_name=class_name)
|
2aa4ebc5ec9e6d4502f7873e6517dc5285f8604e
| 32,127 |
def _is_y(filename):
"""
Checks whether a file is a Nanometrics Y file or not.
:type filename: str
:param filename: Name of the Nanometrics Y file to be checked.
:rtype: bool
:return: ``True`` if a Nanometrics Y file.
.. rubric:: Example
>>> _is_y("/path/to/YAYT_BHZ_20021223.124800") #doctest: +SKIP
True
"""
try:
# get first tag (16 bytes)
with open(filename, 'rb') as fh:
_, tag_type, _, _ = _parse_tag(fh)
except Exception:
return False
# The first tag in a Y-file must be the TAG_Y_FILE tag (tag type 0)
if tag_type != 0:
return False
return True
|
adbb75533934d5050658b8a5078e66438b3381df
| 32,129 |
def is_scheduler_filter_enabled(filter_name):
"""Check the list of enabled compute scheduler filters from config. """
filters = CONF.compute_feature_enabled.scheduler_available_filters
if len(filters) == 0:
return False
if 'all' in filters:
return True
if filter_name in filters:
return True
return False
|
f40e99f49a49aa24e66de72bad82b87ccf6ae8a2
| 32,130 |
from m2py.numerical.roots import nraphson
def juros_price(PV, PMT, n, PV0=0):
"""
Calcula taxa de juros de um parcelamento pela table price
Usado comummente em cŕedito concedido ao consumidor
:param PV: Valor a Vista / Valor Presente
:param PV0: Entrada
:param PMT: Valor da Parcela
:param n: Número de parcelas
:return: Taxa de juros decimal usada no parcelamento
"""
c = (PV - PV0) / PMT
f = lambda i: (1 - 1 / (1 + i) ** n) / i - c
df = lambda i: ((i + 1) ** -n - 1 * n) / i - (1 - 1 / (i + 1) ** n) / i ** 2
root, _, _ = nraphson(f, df, 2, tol=1e-5, maxit=1000)
return round(root, 5)
|
d593f27616c7028b39e80dff47a446a40fe43338
| 32,131 |
import json
import re
def delexicalisation(out_src, out_trg, category, properties_objects):
"""
Perform delexicalisation.
:param out_src: source string
:param out_trg: target string
:param category: DBPedia category
:param properties_objects: dictionary mapping properties to objects
:return: delexicalised strings of the source and target; dictionary containing mappings of the replacements made
"""
with open('delex_dict.json') as data_file:
data = json.load(data_file)
# replace all occurrences of Alan_Bean to ASTRONAUT in input
delex_subj = data[category]
delex_src = out_src
delex_trg = out_trg
# for each instance, we save the mappings between nondelex and delex
replcments = {}
for subject in delex_subj:
clean_subj = ' '.join(re.split('(\W)', subject.replace('_', ' ')))
if clean_subj in out_src:
delex_src = out_src.replace(clean_subj + ' ', category.upper() + ' ')
replcments[category.upper()] = ' '.join(clean_subj.split()) # remove redundant spaces
if clean_subj in out_trg:
delex_trg = out_trg.replace(clean_subj + ' ', category.upper() + ' ')
replcments[category.upper()] = ' '.join(clean_subj.split())
# replace all occurrences of objects by PROPERTY in input
for pro, obj in sorted(properties_objects.items()):
obj_clean = ' '.join(re.split('(\W)', obj.replace('_', ' ').replace('"', '')))
if obj_clean in delex_src:
delex_src = delex_src.replace(obj_clean + ' ', pro.upper() + ' ')
replcments[pro.upper()] = ' '.join(obj_clean.split()) # remove redundant spaces
if obj_clean in delex_trg:
delex_trg = delex_trg.replace(obj_clean + ' ', pro.upper() + ' ')
replcments[pro.upper()] = ' '.join(obj_clean.split())
# possible enhancement for delexicalisation:
# do delex triple by triple
# now building | location | New_York_City New_York_City | isPartOf | New_York
# is converted to
# BUILDING location ISPARTOF City ISPARTOF City isPartOf ISPARTOF
return delex_src, delex_trg, replcments
|
55108ff40e8739571a99a3481221c82c0fcbf255
| 32,132 |
def create_attention_mask_from_input_mask_v1(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = bert_utils.get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = bert_utils.get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask_boradcast = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.cast(tf.expand_dims(to_mask, -1), tf.float32)
# tf.ones(
# shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask_boradcast
return mask
|
0f83dd4a2e5cf904f19f89ce5b42e562c4ba401e
| 32,133 |
import hashlib
def md5(filename):
"""Hash function for files to be uploaded to Fl33t"""
md5hash = hashlib.md5()
with open(filename, "rb") as filehandle:
for chunk in iter(lambda: filehandle.read(4096), b""):
md5hash.update(chunk)
return md5hash.hexdigest()
|
35068abafee2c5c4b1ac672f603b0e720a8c9a8c
| 32,134 |
from typing import Iterable
from typing import Any
from typing import Tuple
def pairwise(iterable: Iterable[Any]) -> Iterable[Tuple[Any, Any]]:
""" Divide the given iter into pairs and return as tuple pairs.
s -> (s0,s1), (s1,s2), (s2, s3), ...
"""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
|
c9867a51d238ee51a993465b1757b387d0c9be6a
| 32,135 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.