content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def station_suffix(station_type):
""" Simple switch, map specific types on to single letter. """
suffix = ' (No Dock)'
if 'Planetary' in station_type and station_type != 'Planetary Settlement':
suffix = ' (P)'
elif 'Starport' in station_type:
suffix = ' (L)'
elif 'Asteroid' in station_type:
suffix = ' (AB)'
elif 'Outpost' in station_type:
suffix = ' (M)'
elif 'Carrier' in station_type:
suffix = ' (C)'
return suffix | c28c4d3f0da8401ffc0721a984ec2b2e2cd50b24 | 13,200 |
def temperatures_equal(t1, t2):
"""Handle 'off' reported as 126.5, but must be set as 0."""
if t1 == settings.TEMPERATURE_OFF:
t1 = 0
if t2 == settings.TEMPERATURE_OFF:
t2 = 0
return t1 == t2 | 5c054d23317565e474f6c8d30b7d87c0b832fe9f | 13,201 |
import os
def calc_cf(fname, standard='GC',thickness=1.0,plot=False,xmin=None,xmax=None,interpolation_type='linear'):
"""
Calculates the calibration factor by using different chosen standards like
fname : filename containing the experimental data done on standard sample
standard : 'GC' or 'Water' for (Glassy Carbon with thickness 0.1055cm from NIST) or (Water with the the known thickness as the samples)
thickness : Thickness of Standard sample in cm. It should be 0.1055 for NIST GC standard.
interpolation_type : 'linear','quadratic'or 'cubic'
plot : True or False for plotting or not plotting to view the goodness of fit
xmin,xmax : minimum and maximum Q-value between which the experimental data will be fitted with the standard data available
"""
if os.path.exists(fname):
if standard=='GC':
std_dat=np.loadtxt('./SetupData/glassy_carbon_saxs_std.txt')
elif standard=='Water':
qst=np.linspace(0.003,1.0,1000)
std_dat=np.vstack((qst,np.ones_like(qst)*1.68e-2)).T
tmp_dat=np.loadtxt(fname)
fh=open(fname,'r')
lines=fh.readlines()
for line in lines:
if line[0]=='#':
try:
header,value=line[1:].split('=')
if header=='Energy':
energy=float(value)
except:
pass
#Checking the data for zeros
exp_dat=[]
for i in range(tmp_dat.shape[0]):
if tmp_dat[i,1]>1e-20:
exp_dat.append(tmp_dat[i,:])
exp_dat=np.array(exp_dat)
if xmin is None:
xmin=np.max([np.min(std_dat[:,0]),np.min(exp_dat[:,0])])
if xmax is None:
xmax=np.min([np.max(std_dat[:,0]),np.max(exp_dat[:,0])])
istdmin=np.argwhere(std_dat[:,0]>=xmin)[0][0]
istdmax=np.argwhere(std_dat[:,0]<=xmax)[-1][0]
expdmin=np.argwhere(exp_dat[:,0]>=xmin)[0][0]
expdmax=np.argwhere(exp_dat[:,0]<=xmax)[-1][0]
xmin=np.max([std_dat[istdmin,0],exp_dat[expdmin,0]])
xmax=np.min([std_dat[istdmax,0],exp_dat[expdmax,0]])
x=np.linspace(1.05*xmin,0.95*xmax,100)
istdf=interp1d(std_dat[:,0],std_dat[:,1],kind=interpolation_type)
expdf=interp1d(exp_dat[:,0],exp_dat[:,1],kind=interpolation_type)
param=Parameters()
param.add('cf',value=1.0,vary=True)
res=minimize(fun,param,args=(x,istdf,expdf,thickness))
cf=res.params['cf'].value
#print(cf,qoff)
#cf=np.mean(istdf(x)/expdf(x))
if plot:
pylab.loglog(std_dat[:,0],std_dat[:,1],'r-',lw=3,label='NIST std')
pylab.loglog(x,istdf(x)-res.residual,'g-',lw=3,label='15IDD data')
pylab.xlabel(u'Q, \u212B$^{-1}$',fontsize=fs)
pylab.ylabel(u'I, cm$^{-1}$',fontsize=fs)
pylab.legend(loc='best',prop={'size':fs*0.6})
pylab.xticks(fontsize=fs)
pylab.yticks(fontsize=fs)
pylab.tight_layout()
pylab.show()
return energy,cf,x,istdf(x)
else:
print('%s doesnot exist!'%fname) | 68d6366d8b9d0be35509b0d4c0d65dc8c22d5668 | 13,202 |
import csv
def import_capitals_from_csv(path):
"""Imports a dictionary that maps country names to capital names.
@param string path: The path of the CSV file to import this data from.
@return dict: A dictionary of the format {"Germany": "Berlin", "Finland": "Helsinki", ...}
"""
capitals = {}
with open(path) as capitals_file:
reader = csv.reader(capitals_file)
for row in reader:
country, capital = row[0], row[1]
capitals[country] = capital
return capitals | 3c6a9c91df455cb8721371fe40b248fb7af8d866 | 13,203 |
import os
import configparser
def read_config(config_file='config.ini'):
"""
Read the configuration file.
:param str config_file: Path to the configuration file.
:return:
"""
if os.path.isfile(config_file) is False:
raise NameError(config_file, 'not found')
config = configparser.ConfigParser()
config.read(config_file)
return config | 0cafb2f280e30467e833d404ac86a8cea03f050a | 13,204 |
def deg_to_qcm2(p, deg):
"""Return the center-of-momentum momentum transfer q squared, in MeV^2.
Parameters
----------
p_rel = float
relative momentum given in MeV.
degrees = number
angle measure given in degrees
"""
return (p * np.sqrt( 2 * (1 - np.cos(np.radians(deg))) ))**(2) | 4b1840a8c672b443ac1954c0bde0a81a01338862 | 13,205 |
from django.conf import settings
def i18n(request):
"""
Set client language preference, lasts for one month
"""
next = request.META.get('HTTP_REFERER', None)
if not next:
next = settings.SITE_ROOT
lang = request.GET.get('lang', 'en')
res = HttpResponseRedirect(next)
res.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang, max_age=30*24*60*60)
return res | a56289c03b14b76719fc31bcee7e0e4e590f47ef | 13,206 |
def replace_suffix(input_filepath, input_suffix, output_suffix, suffix_delimiter=None):
""" Replaces an input_suffix in a filename with an output_suffix. Can be used
to generate or remove suffixes by leaving one or the other option blank.
TODO: Make suffixes accept regexes. Can likely replace suffix_delimiter after this.
TODO: Decide whether suffixes should extend across multiple directory levels.
Parameters
----------
input_filepath: str
The filename to be transformed.
input_suffix: str
The suffix to be replaced
output_suffix: str
The suffix to replace with.
suffix_delimiter: str
Optional, overrides input_suffix. Replaces whatever
comes after suffix_delimiter with output_suffix.
Returns
-------
output_filepath: str
The transformed filename
"""
split_filename = nifti_splitext(input_filepath)
if suffix_delimiter is not None:
input_suffix = str.split(split_filename[0], suffix_delimiter)[-1]
if input_suffix not in os.path.basename(input_filepath):
print 'ERROR!', input_suffix, 'not in input_filepath.'
return []
else:
if input_suffix == '':
prefix = split_filename[0]
else:
prefix = input_suffix.join(str.split(split_filename[0], input_suffix)[0:-1])
prefix = prefix + output_suffix
output_filepath = prefix + split_filename[1]
return output_filepath | 6bc899c3ed2bcf86d085972ad6b2cd90f77269aa | 13,207 |
import random
def normal222(startt,endt,money2,first,second,third,forth,fifth,sixth,seventh,zz1,zz2,bb1,bb2,bb3,aa1,aa2):
"""
for source and destination id generation
"""
"""
for type of banking work,label of fraud and type of fraud
"""
idvariz=random.choice(bb3)
idgirande=random.choice(zz2)
first.append("transfer")
second.append(idvariz)
third.append(idgirande)
sixth.append("0")
seventh.append("none")
"""
for amount of money generation
"""
numberofmoney=random.randrange(50000,money2)
forth.append(numberofmoney)
"""
for date and time generation randomly between two dates
"""
final=randomDate(startt,endt, random.random())
fifth.append(final)
return (first,second,third,forth,fifth,sixth,seventh) | 5d0a4cfbd5e7ef3223cc67c4759967e86c4081c8 | 13,208 |
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict)
return model | b7266a1afe0f03717a8e710f9099a0349cc5b085 | 13,209 |
def _tavella_randell_nonuniform_grid(x_min, x_max, x_star, num_grid_points,
alpha, dtype):
"""Creates non-uniform grid clustered around a specified point.
Args:
x_min: A real `Tensor` of shape `(dim,)` specifying the lower limit of the
grid.
x_max: A real `Tensor` of same shape and dtype as `x_min` specifying the
upper limit of the grid.
x_star: A real `Tensor` of same shape and dtype as `x_min` specifying the
location on the grid around which higher grid density is desired.
num_grid_points: A scalar integer `Tensor` specifying the number of points
on the grid.
alpha: A scalar parameter which controls the degree of non-uniformity of the
grid. The smaller values of `alpha` correspond to greater degree of
clustering around `x_star`.
dtype: The default dtype to use when converting values to `Tensor`s.
Returns:
A real `Tensor` of shape `(dim, num_grid_points+1)` containing the
non-uniform grid.
"""
c1 = tf.math.asinh((x_min - x_star) / alpha)
c2 = tf.math.asinh((x_max - x_star) / alpha)
i = tf.expand_dims(tf.range(0, num_grid_points + 1, 1, dtype=dtype), axis=-1)
grid = x_star + alpha * tf.math.sinh(c2 * i / num_grid_points + c1 *
(1 - i / num_grid_points))
# reshape from (num_grid_points+1, dim) to (dim, num_grid_points+1)
return tf.transpose(grid) | 6c209871b3a8aba291b00a513056d5e1ebe111f8 | 13,210 |
import functools
import logging
def node(func):
"""Decorator for functions which should get currentIndex node if no arg is passed"""
@functools.wraps(func)
def node_wrapper(self, *a, **k):
n = False
keyword = True
# Get node from named parameter
if 'node' in k:
n = k['node']
# Or from the first unnamed argument
elif len(a) >= 1:
n = a[0]
keyword = False
# If node was not specified, get from currentIndex
if n in (None, False):
n = self.model().data(self.currentIndex(), role=QtUserRole)
logging.debug('@node not specified: got selected', n)
elif isDataset(n):
n = docname(n)
logging.debug('@node was a dataset: found path', n)
# If node was expressed as/converted to string, get its corresponding
# tree entry
if isinstance(n, str) or isinstance(n, unicode):
logging.debug('traversing node', n)
n = str(n)
n = self.model().tree.traverse(n)
if keyword:
k['node'] = n
else:
a = list(a)
a[0] = n
a = tuple(a)
logging.debug('@node returning', n, type(n), isinstance(n, unicode))
return func(self, *a, **k)
return node_wrapper | d86a6a9e15314905cacc184f5939399f7b481f49 | 13,211 |
import uuid
def tourme_details():
""" Display Guides loan-details """
return render_template('tourme_details.html', id=str(uuid.uuid4())) | e332546257670bd26d08be8baf4122f93feab170 | 13,212 |
import json
def gen_dot_ok(notebook_path, endpoint):
"""
Generates .ok file and return its name
Args:
notebook_path (``pathlib.Path``): the path to the notebook
endpoint (``str``): an endpoint specification for https://okpy.org
Returns:
``str``: the name of the .ok file
"""
assert notebook_path.suffix == '.ipynb', notebook_path
ok_path = notebook_path.with_suffix('.ok')
name = notebook_path.stem
src = [notebook_path.name]
with open(ok_path, 'w') as out:
json.dump({
"name": name,
"endpoint": endpoint,
"src": src,
"tests": {
"tests/q*.py": "ok_test"
},
"protocols": [
"file_contents",
"grading",
"backup"
]
}, out)
return ok_path.name | 850827a3da476cc64bd50c40c2504d9765b25dfe | 13,213 |
def sarig_methods_wide(
df: pd.DataFrame, sample_id: str, element_id: str,
) -> pd.DataFrame:
"""Create a corresponding methods table to match the pivoted wide form data.
.. note::
This requires the input dataframe to already have had methods mapping applied
by running ``pygeochemtools.geochem.create_dataset.add_sarig_chem_method``
function.
Args:
df (pd.DataFrame): Dataframe containing long form data.
sample_id (str): Name of column containing sample ID's.
element_id (str): Name of column containing geochemical element names.
Returns:
pd.DataFrame: Dataframe with mapped geochemical methods converted to wide form
with one method per sample.
"""
...
df = df
# grab duplicate values
duplicate_df = df[df.duplicated(subset=[sample_id, element_id], keep="last")]
df = df.drop_duplicates(subset=[sample_id, element_id])
method_code = (
df.pivot(index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
determination = (
df.pivot(index=[sample_id], columns=element_id, values=["DETERMINATION"],)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
digestion = (
df.pivot(index=[sample_id], columns=element_id, values=["DIGESTION"],)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
fusion = (
df.pivot(index=[sample_id], columns=element_id, values=["FUSION"],)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
assert (
method_code.columns.size
== determination.columns.size # noqa: W503
== digestion.columns.size # noqa: W503
== fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
c = np.empty(
(
method_code.columns.size
+ determination.columns.size # noqa: W503
+ digestion.columns.size # noqa: W503
+ fusion.columns.size, # noqa: W503
),
dtype=object,
)
c[0::4], c[1::4], c[2::4], c[3::4] = (
method_code.columns,
determination.columns,
digestion.columns,
fusion.columns,
)
df_wide = pd.concat([method_code, determination, digestion, fusion], axis=1)[c]
if not duplicate_df.empty:
try:
dup_method_code = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["CHEM_METHOD_CODE"],
)
.add_suffix("_METHOD_CODE")
.droplevel(0, axis=1)
)
dup_determination = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DETERMINATION"],
)
.add_suffix("_DETERMINATION")
.droplevel(0, axis=1)
)
dup_digestion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["DIGESTION"],
)
.add_suffix("_DIGESTION")
.droplevel(0, axis=1)
)
dup_fusion = (
duplicate_df.pivot(
index=[sample_id], columns=element_id, values=["FUSION"],
)
.add_suffix("_FUSION")
.droplevel(0, axis=1)
)
except ValueError as e:
print(
"There were duplicate duplicates in the method list. \
So no duplicates have been included in the output",
e,
)
else:
assert (
dup_method_code.columns.size
== dup_determination.columns.size # noqa: W503
== dup_digestion.columns.size # noqa: W503
== dup_fusion.columns.size # noqa: W503
), "pivoted column lengths aren't equal"
d = np.empty(
(
dup_method_code.columns.size
+ dup_determination.columns.size # noqa: W503
+ dup_digestion.columns.size # noqa: W503
+ dup_fusion.columns.size, # noqa: W503
),
dtype=object,
)
d[0::4], d[1::4], d[2::4], d[3::4] = (
dup_method_code.columns,
dup_determination.columns,
dup_digestion.columns,
dup_fusion.columns,
)
dup_df_wide = pd.concat(
[dup_method_code, dup_determination, dup_digestion, dup_fusion], axis=1
)[d]
df_wide = df_wide.append(dup_df_wide).sort_values(by=sample_id)
return df_wide | 95d969213de0702f3a6666e8e13ae2d37e404e3a | 13,214 |
def scan_torsion(resolution, unique_conformers=[]):
"""
"""
# Load the molecule
sdf_filename = "sdf/pentane.sdf"
suppl = Chem.SDMolSupplier(sdf_filename, removeHs=False, sanitize=True)
mol = next(suppl)
# Get molecule information
# n_atoms = mol.GetNumAtoms()
atoms = mol.GetAtoms()
# atoms = [atom.GetAtomicNum() for atom in atoms]
atoms = [atom.GetSymbol() for atom in atoms]
# Origin
conformer = mol.GetConformer()
origin = conformer.GetPositions()
origin -= rmsd.centroid(origin)
# Dihedral angle
a = 0
b = 1
c = 5
d = 8
# Origin angle
origin_angle = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
# Setup forcefield
mp = ChemicalForceFields.MMFFGetMoleculeProperties(mol)
ff = ChemicalForceFields.MMFFGetMoleculeForceField(mol, mp)
# Define all angles to scan
angles = get_angles(resolution)
debug_file = "test.xyz"
debug_file = open(debug_file, 'a+')
if len(unique_conformers) == 0:
xyz = rmsd.calculate_rmsd.set_coordinates(atoms, origin)
debug_file.write(xyz)
debug_file.write("\n")
unique_conformers = [origin]
for angle in angles:
# Reset position
for i, pos in enumerate(origin):
conformer.SetAtomPosition(i, pos)
# Set clockwork angle
Chem.rdMolTransforms.SetDihedralDeg(conformer, a, b, c, d, origin_angle + angle)
# Setup constrained ff
ffc = ChemicalForceFields.MMFFGetMoleculeForceField(mol, mp)
ffc.MMFFAddTorsionConstraint(a, b, c, d, False,
origin_angle+angle, origin_angle + angle, 1.0e10)
ffc.Minimize(maxIts=1000, energyTol=1e-2, forceTol=1e-3)
# angle1 = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
ff.Minimize(maxIts=1000, energyTol=1e-2, forceTol=1e-4)
# angle2 = Chem.rdMolTransforms.GetDihedralDeg(conformer, a, b, c, d)
pos = conformer.GetPositions()
pos -= rmsd.centroid(pos)
print("debug", len(unique_conformers))
unique = compare_positions(pos, unique_conformers)
if not unique:
continue
pos = align(pos, origin)
unique_conformers.append(pos)
xyz = rmsd.calculate_rmsd.set_coordinates(atoms, pos)
debug_file.write(xyz)
debug_file.write("\n")
print(angle, unique)
debug_file.close()
return unique_conformers | 8dfd6e0443ef83361aa993be4e9613435f78bf14 | 13,215 |
def vflip_box(box: TensorOrArray, image_center: TensorOrArray) -> TensorOrArray:
"""Flip boxes vertically, which are specified by their (cx, cy, w, h) norm
coordinates.
Reference:
https://blog.paperspace.com/data-augmentation-for-bounding-boxes/
Args:
box (TensorOrArray[B, 4]):
Boxes to be flipped.
image_center (TensorOrArray[4]):
Center of the image.
Returns:
box (TensorOrArray[B, 4]):
Flipped boxes.
"""
box[:, [1, 3]] += 2 * (image_center[[0, 2]] - box[:, [1, 3]])
box_h = abs(box[:, 1] - box[:, 3])
box[:, 1] -= box_h
box[:, 3] += box_h
return box | 99128e7b6d928c1b58457fc6d51b971c109cc77e | 13,216 |
import pathlib
def example_data():
"""Example data setup"""
tdata = (
pathlib.Path(__file__).parent.absolute() / "data" / "ident-example-support.txt"
)
return tdata | a8c9a88f8850fecc7cc05fb8c9c18e03778f3365 | 13,217 |
def add_ending_slash(directory: str) -> str:
"""add_ending_slash function
Args:
directory (str): directory that you want to add ending slash
Returns:
str: directory name with slash at the end
Examples:
>>> add_ending_slash("./data")
"./data/"
"""
if directory[-1] != "/":
directory = directory + "/"
return directory | 2062a55b59707dd48e5ae56d8d094c806d8a2c1d | 13,218 |
import scipy
def antenna_positions():
"""
Generate antenna positions for a regular rectangular array, then return
baseline lengths.
- Nx, Ny : No. of antennas in x and y directions
- Dmin : Separation between neighbouring antennas
"""
# Generate antenna positions on a regular grid
x = np.arange(Nx) * Dmin
y = np.arange(Ny) * Dmin
xx, yy = np.meshgrid(x, y)
# Calculate baseline separations
xy = np.column_stack( (xx.flatten(), yy.flatten()) )
d = scipy.spatial.distance.pdist(xy)
return d | f47a0c887489987d8fc95205816459db55bbaa19 | 13,219 |
from typing import Union
from typing import List
def folds_to_list(folds: Union[list, str, pd.Series]) -> List[int]:
"""
This function formats string or either list of numbers
into a list of unique int
Args:
folds (Union[list, str, pd.Series]): Either list of numbers or
one string with numbers separated by commas or
pandas series
Returns:
List[int]: list of unique ints
Examples:
>>> folds_to_list("1,2,1,3,4,2,4,6")
[1, 2, 3, 4, 6]
>>> folds_to_list([1, 2, 3.0, 5])
[1, 2, 3, 5]
Raises:
ValueError: if value in string or array cannot be casted to int
"""
if isinstance(folds, str):
folds = folds.split(",")
elif isinstance(folds, pd.Series):
folds = list(sorted(folds.unique()))
return list({int(x) for x in folds}) | f499cce7992b77867fc3a7d95c8dd6efc83c3c79 | 13,220 |
import gc
def predict_all(x, model, config, spline):
"""
Predict full scene using average predictions.
Args:
x (numpy.array): image array
model (tf h5): image target size
config (Config):
spline (numpy.array):
Return:
prediction scene array average probabilities
----------
Example
----------
predict_all(x, model, config, spline)
"""
for i in range(8):
if i == 0: # reverse first dimension
x_seg = predict_windowing(
x[::-1, :, :], model, config, spline=spline
).transpose([2, 0, 1])
elif i == 1: # reverse second dimension
temp = predict_windowing(
x[:, ::-1, :], model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp[:, ::-1, :] + x_seg
elif i == 2: # transpose(interchange) first and second dimensions
temp = predict_windowing(
x.transpose([1, 0, 2]), model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp.transpose(0, 2, 1) + x_seg
gc.collect()
elif i == 3:
temp = predict_windowing(
np.rot90(x, 1), model, config, spline=spline
)
x_seg = np.rot90(temp, -1).transpose([2, 0, 1]) + x_seg
gc.collect()
elif i == 4:
temp = predict_windowing(
np.rot90(x, 2), model, config, spline=spline
)
x_seg = np.rot90(temp, -2).transpose([2, 0, 1]) + x_seg
elif i == 5:
temp = predict_windowing(
np.rot90(x, 3), model, config, spline=spline
)
x_seg = np.rot90(temp, -3).transpose(2, 0, 1) + x_seg
elif i == 6:
temp = predict_windowing(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
elif i == 7:
temp = predict_sliding(
x, model, config, spline=spline
).transpose([2, 0, 1])
x_seg = temp + x_seg
gc.collect()
del x, temp # delete arrays
x_seg /= 8.0
return x_seg.argmax(axis=0) | 6e5888b1d97c3a0924a67500c4b731fd00654cb2 | 13,221 |
def pre_arrange_cols(dataframe):
"""
DOCSTRING
:param dataframe:
:return:
"""
col_name = dataframe.columns.values[0]
dataframe.loc[-1] = col_name
dataframe.index = dataframe.index + 1
dataframe = dataframe.sort_index()
dataframe = dataframe.rename(index=str, columns={col_name: 'all'})
return dataframe | 522c0f4ca29b10d4a736d27f07d8e9dc80cafba5 | 13,222 |
import numpy
def wDot(x,y,h):
"""
Compute the parallel weighted dot product of vectors x and y using
weight vector h.
The weighted dot product is defined for a weight vector
:math:`\mathbf{h}` as
.. math::
(\mathbf{x},\mathbf{y})_h = \sum_{i} h_{i} x_{i} y_{i}
All weight vector components should be positive.
:param x,y,h: numpy arrays for vectors and weight
:return: the weighted dot product
"""
return globalSum(numpy.sum(x*y*h)) | e9bcc295517060f95004aec581055950358c9521 | 13,223 |
def dataframify(transform):
"""
Method which is a decorator transforms output of scikit-learn feature normalizers from array to dataframe.
Enables preservation of column names.
Args:
transform: (function), a scikit-learn feature selector that has a transform method
Returns:
new_transform: (function), an amended version of the transform method that returns a dataframe
"""
@wraps(transform)
def new_transform(self, df):
arr = transform(self, df.values)
return pd.DataFrame(arr, columns=df.columns, index=df.index)
return new_transform | cf21bb7aea90e742c83fb5e1abb41c5d01cddf4e | 13,224 |
def plot_map(self, map, update=False):
"""
map plotting
Parameters
----------
map : ndarray
map to plot
update : Bool
updating the map or plotting from scratch
"""
if update:
empty=np.empty(np.shape(self.diagnostics[self.diagnostic]))
empty[:]=np.nan
self.map.set_data(empty)
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax)
else:
return self.map_window.imshow(map, origin='lower', interpolation='nearest',cmap=self.cmap, vmin=self.vmin, vmax=self.vmax) | a4effd8c7e958b694f2c01afdaa861455c855a0b | 13,225 |
def definition_activate(connection, args):
"""Activate Business Service Definition"""
activator = sap.cli.wb.ObjectActivationWorker()
activated_items = ((name, sap.adt.ServiceDefinition(connection, name)) for name in args.name)
return sap.cli.object.activate_object_list(activator, activated_items, count=len(args.name)) | 61dd5de0d8e24339c67363e904628390cc79b1da | 13,226 |
import re
def extractCompositeFigureStrings(latexString):
"""
Returns a list of latex figures as strings stripping out captions.
"""
# extract figures
figureStrings = re.findall(r"\\begin{figure}.*?\\end{figure}", latexString, re.S)
# filter composite figures only and remove captions (preserving captions in subfigures)
figureStrings = [
re.findall(r"\\begin{figure}.*(?=\n.*\\caption)", figureString, re.S)[0] + "\n\\end{figure}"
for figureString in figureStrings if "\\begin{subfigure}" in figureString
]
return figureStrings | 83a80c91890d13a6a0247745835e1ffb97d579f7 | 13,227 |
import time
def osm_net_download(
polygon=None,
north=None,
south=None,
east=None,
west=None,
network_type="all_private",
timeout=180,
memory=None,
date="",
max_query_area_size=50 * 1000 * 50 * 1000,
infrastructure='way["highway"]',
):
"""
Download OSM ways and nodes within some bounding box from the Overpass API.
Parameters
----------
polygon : shapely Polygon or MultiPolygon
geographic shape to fetch the street network within
north : float
northern latitude of bounding box
south : float
southern latitude of bounding box
east : float
eastern longitude of bounding box
west : float
western longitude of bounding box
network_type : string
{'walk', 'bike', 'drive', 'drive_service', 'all', 'all_private'} what
type of street network to get
timeout : int
the timeout interval for requests and to pass to API
memory : int
server memory allocation size for the query, in bytes. If none, server
will use its default allocation size
date : string
query the database at a certain timestamp
max_query_area_size : float
max area for any part of the geometry, in the units the geometry is in:
any polygon bigger will get divided up for multiple queries to API
(default is 50,000 * 50,000 units [ie, 50km x 50km in area, if units are
meters])
infrastructure : string
download infrastructure of given type. default is streets, ie,
'way["highway"]') but other infrastructures may be selected like power
grids, ie, 'way["power"~"line"]'
Returns
-------
response_jsons : list
"""
# check if we're querying by polygon or by bounding box based on which
# argument(s) where passed into this function
by_poly = polygon is not None
by_bbox = not (
north is None or south is None or east is None or west is None
)
if not (by_poly or by_bbox):
raise ValueError(
"You must pass a polygon or north, south, east, and west"
)
# create a filter to exclude certain kinds of ways based on the requested
# network_type
osm_filter = ox.get_osm_filter(network_type)
response_jsons = []
# pass server memory allocation in bytes for the query to the API
# if None, pass nothing so the server will use its default allocation size
# otherwise, define the query's maxsize parameter value as whatever the
# caller passed in
if memory is None:
maxsize = ""
else:
maxsize = "[maxsize:{}]".format(memory)
# define the query to send the API
# specifying way["highway"] means that all ways returned must have a highway
# key. the {filters} then remove ways by key/value. the '>' makes it recurse
# so we get ways and way nodes. maxsize is in bytes.
if by_bbox:
# turn bbox into a polygon and project to local UTM
polygon = Polygon(
[(west, south), (east, south), (east, north), (west, north)]
)
geometry_proj, crs_proj = ox.project_geometry(polygon)
# subdivide it if it exceeds the max area size (in meters), then project
# back to lat-long
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
log(
"Requesting network data within bounding box from API in {:,} request(s)".format(
len(geometry)
)
)
start_time = time.time()
# loop through each polygon rectangle in the geometry (there will only
# be one if original bbox didn't exceed max area size)
for poly in geometry:
# represent bbox as south,west,north,east and round lat-longs to 8
# decimal places (ie, within 1 mm) so URL strings aren't different
# due to float rounding issues (for consistent caching)
west, south, east, north = poly.bounds
query_template = (
date
+ "[out:json][timeout:{timeout}]{maxsize};"
+ "({infrastructure}{filters}"
+ "({south:.8f},{west:.8f},{north:.8f},{east:.8f});>;);out;"
)
query_str = query_template.format(
north=north,
south=south,
east=east,
west=west,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within bounding box from API in {:,} request(s) and {:,.2f} seconds".format(
len(geometry), time.time() - start_time
)
)
elif by_poly:
# project to utm, divide polygon up into sub-polygons if area exceeds a
# max size (in meters), project back to lat-long, then get a list of
# polygon(s) exterior coordinates
geometry_proj, crs_proj = ox.project_geometry(polygon)
geometry_proj_consolidated_subdivided = ox.consolidate_subdivide_geometry(
geometry_proj, max_query_area_size=max_query_area_size
)
geometry, _ = ox.project_geometry(
geometry_proj_consolidated_subdivided,
crs=crs_proj,
to_latlong=True,
)
polygon_coord_strs = ox.get_polygons_coordinates(geometry)
log(
"Requesting network data within polygon from API in {:,} request(s)".format(
len(polygon_coord_strs)
)
)
start_time = time.time()
# pass each polygon exterior coordinates in the list to the API, one at
# a time
for polygon_coord_str in polygon_coord_strs:
query_template = (
date
+ '[out:json][timeout:{timeout}]{maxsize};({infrastructure}{filters}(poly:"{polygon}");>;);out;'
)
query_str = query_template.format(
polygon=polygon_coord_str,
infrastructure=infrastructure,
filters=osm_filter,
timeout=timeout,
maxsize=maxsize,
)
response_json = ox.overpass_request(
data={"data": query_str}, timeout=timeout
)
response_jsons.append(response_json)
log(
"Got all network data within polygon from API in {:,} request(s) and {:,.2f} seconds".format(
len(polygon_coord_strs), time.time() - start_time
)
)
return response_jsons | faf949e015c365c3822131634f55c73e2e9fef0c | 13,228 |
import os
import logging
def _find_results_files(source_path: str, search_depth: int = 2) -> list:
"""Looks for results.json files in the path specified
Arguments:
source_path: the path to use when looking for result files
search_depth: the maximum folder depth to search
Return:
Returns a list containing found files
Notes:
A search depth of less than 2 will not recurse into sub-folders; a search depth of 2 will only recurse into
immediate sub-folders and no deeper; a search depth of 3 will recurse into the sub-folders of sub-folders; and
so on
"""
res_name = 'results.json'
res_name_len = len(res_name)
# Common expression declared once outside of recursion
# Checks that the file name matches exactly the testing string (res_name)
name_check_passes = lambda name: name.endswith(res_name) and os.path.isdir(name[:-res_name_len])
if not source_path:
return []
# Declare embedded function to do the work
def perform_recursive_find(path: str, depth: int) -> list:
"""Recursively finds results files
Arguments:
path: the path to check for results files
depth: the maximum folder depth to recurse (starting at 1)
Return:
Returns a list of found files
"""
return_list = []
# Basic checks
if os.path.isfile(path):
if name_check_passes(path):
logging.debug("Result file check specified result file: '%s'", path)
return [path]
logging.debug("Result file check name is not valid: '%s'", path)
return return_list
# We only process folders after the above checks
if not os.path.isdir(path):
logging.debug("Error: result file check path is not a file or folder: '%s'", path)
return return_list
# Loop over current folder looking for other folders and for a results file
for one_name in os.listdir(path):
check_name = os.path.join(path, one_name)
if name_check_passes(check_name):
logging.debug("Found result file: '%s'", check_name)
return_list.append(check_name)
elif depth > 1 and os.path.isdir(check_name):
logging.debug("Searching folder for result files: '%s'", check_name)
found_results = perform_recursive_find(check_name, depth - 1)
if found_results:
return_list.extend(found_results)
return return_list
# Find those files!
return perform_recursive_find(source_path, search_depth) | 1116ff41301754e79a43a0d25462eddeee3bafa3 | 13,229 |
from re import S
def rubi_integrate(expr, var, showsteps=False):
"""
Rule based algorithm for integration. Integrates the expression by applying
transformation rules to the expression.
Returns `Integrate` if an expression cannot be integrated.
Parameters
==========
expr : integrand expression
var : variable of integration
Returns Integral object if unable to integrate.
"""
rubi = LoadRubiReplacer().load()
expr = expr.replace(sym_exp, rubi_exp)
expr = process_trig(expr)
expr = rubi_powsimp(expr)
if isinstance(expr, (int, Integer, float, Float)):
return S(expr)*var
if isinstance(expr, Add):
results = 0
for ex in expr.args:
results += rubi.replace(Integral(ex, var))
return process_final_integral(results)
results = util_rubi_integrate(Integral(expr, var))
return process_final_integral(results) | 25eccde81fe0425fbf35c522b24e79195684f537 | 13,230 |
def daily_price_read(sheet_name):
"""
读取股票名称和股票代码
:param sheet_name:
:return:
"""
sql = "SELECT * FROM public.%s limit 50000" % sheet_name
resultdf = pd.read_sql(sql, engine_postgre)
resultdf['trade_date'] = resultdf['trade_date'].apply(lambda x: x.strftime('%Y-%m-%d'))
resultdf['code'] = resultdf[['code', 'exchangeCD']].apply(lambda x: str(x[0]).zfill(6) + '.'+x[1], axis=1)
return resultdf | d68c326604c21b6375e77fb012a9776d87be617f | 13,231 |
def _isfloat(string):
"""
Checks if a string can be converted into a float.
Parameters
----------
value : str
Returns
-------
bool:
True/False if the string can/can not be converted into a float.
"""
try:
float(string)
return True
except ValueError:
return False | 74ae50761852d8b22ac86f6b6332bd70e42bf623 | 13,232 |
def get(url, **kwargs):
"""
get json data from API
:param url:
:param kwargs:
:return:
"""
try:
result = _get(url, **kwargs)
except (rq.ConnectionError, rq.ReadTimeout):
result = {}
return result | a3c17ce6ab383373e7215dd0e5b3e63130739126 | 13,233 |
def sample_indep(p, N, T, D):
"""Simulate an independent sampling mask."""
obs_ind = np.full((N, T, D), -1)
for n in range(N):
for t in range(T):
pi = np.random.binomial(n=1, p=p, size=D)
ind = np.where(pi == 1)[0]
count = ind.shape[0]
obs_ind[n, t, :count] = ind
return obs_ind | 69a469d45040ed1598f7d946b6706f70f23dc580 | 13,234 |
def _relabel_targets(y, s, ranks, n_relabels):
"""Compute relabelled targets based on predicted ranks."""
demote_ranks = set(sorted(ranks[(s == 0) & (y == 1)])[:n_relabels])
promote_ranks = set(sorted(ranks[(s == 1) & (y == 0)])[-n_relabels:])
return np.array([
_relabel(_y, _s, _r, promote_ranks, demote_ranks)
for _y, _s, _r in zip(y, s, ranks)]) | e8c06364a717210da6c0c60c883fee05d61ed3eb | 13,235 |
def exp(x):
"""Take exponetial of input x.
Parameters
----------
x : Expr
Input argument.
Returns
-------
y : Expr
The result.
"""
return call_pure_intrin(x.dtype, "exp", x) | 9ec31c0b9928108680c2818d1fe110d36c81b08d | 13,236 |
def check_table(words_in_block, block_width, num_lines_in_block):
""" Check if a block is a block of tables or of text."""
# average_words_per_line=24
# total_num_words = 0
ratio_threshold = 0.50
actual_num_chars = 0
all_char_ws = []
cas = []
# total_num_words += len(line)
if num_lines_in_block > 0:
for word in words_in_block:
if word['word']:
actual_num_chars += len(word['word'])
char_w = float(word['r']-word['l'])/len(word['word'])
cas.append(round(char_w, 2))
all_char_ws.extend(cas)
average_char_width = np.mean(all_char_ws)
expected_num_chars = (float(block_width)/average_char_width)*num_lines_in_block
# expected_word_count = average_words_per_line*num_lines_in_block
ratio = actual_num_chars/expected_num_chars
if ratio < ratio_threshold:
return True
else: return False
else: return False | 46c785eefc7694a88d1814b9ca01f41fffa9d1f8 | 13,237 |
def defaultSampleFunction(xy1, xy2):
"""
The sample function compares how similar two curves are.
If they are exactly the same it will return a value of zero.
The default function returns the average error between each sample point in two arrays of x/y points,
xy1 and xy2.
Parameters
----------
xy1 : array
The first input 2D x/y array of points.
xy2 : array
The second input 2D x/y array of points.
Returns
-------
float
The average "distance" between each point on the curve. The output quantity is unitless.
"""
x1 = xy1[:,0]
x2 = xy2[:,0]
y1 = xy1[:,1]
y2 = xy2[:,1]
diff = ((x1 - x2)**2 + (y1 - y2)**2)**(0.5)
return np.sum(diff)/len(x1) | 5654145e9a2d3701d289094ababc94d9ed972def | 13,238 |
from re import S
def makesubs(formula,intervals,values=None,variables=None,numden=False):
"""Generates a new formula which satisfies this condition:
for all positive variables new formula is nonnegative iff
for all variables in corresponding intervals old formula is nonnegative.
>>> newproof()
>>> makesubs('1-x^2','[0,1]')
Substitute $x\to 1 - \frac{1}{a + 1}$ #depend on shiro.display
(2*a + 1)/(a**2 + 2*a + 1)
>>> makesubs('1-x^2','[0,1]',values='1/2')
Substitute $x\to 1 - \frac{1}{b + 1}$ #depend on shiro.display
((2*b + 1)/(b**2 + 2*b + 1), [1])
>>> makesubs('1-x^2','[0,1]',values='1/2',numden=True)
Substitute $x\to 1 - \frac{1}{c + 1}$ #depend on shiro.display
(2*c + 1, c**2 + 2*c + 1, [1])
"""
formula=S(formula)
addsymbols(formula)
intervals=_smakeiterable2(intervals)
if variables: variables=_smakeiterable(variables)
else: variables=sorted(formula.free_symbols,key=str)
if values!=None:
values=_smakeiterable(values)
equations=[var-value for var,value in zip(variables,values)]
else:
equations=[]
newvars=[]
warn=0
usedvars=set()
for var,interval in zip(variables,intervals):
end1,end2=interval
z=newvar()
newvars+=[z]
usedvars|={var}
if (end1.free_symbols|end2.free_symbols)&usedvars:
warn=1
if end1 in {S('-oo'),S('oo')}:
end1,end2=end2,end1
if {end1,end2}=={S('-oo'),S('oo')}:
sub1=sub2=(z-1/z)
elif end2==S('oo'):
sub1=sub2=(end1+z)
elif end2==S('-oo'):
sub1=sub2=end1-z
else:
sub1=end2+(end1-end2)/z
sub2=end2+(end1-end2)/(1+z)
formula=formula.subs(var,sub1)
shiro.display(shiro.translation['Substitute']+" $"+latex(var)+'\\to '+latex(sub2)+'$')
equations=[equation.subs(var,sub1) for equation in equations]
num,den=fractioncancel(formula)
for var,interval in zip(newvars,intervals):
if {interval[0],interval[1]} & {S('oo'),S('-oo')}==set():
num=num.subs(var,var+1)
den=den.subs(var,var+1)
equations=[equation.subs(var,var+1) for equation in equations]
if values:
values=ssolve(equations,newvars)
if len(values):
values=values[0]
num,den=expand(num),expand(den)
#shiro.display(shiro.translation["Formula after substitution:"],"$$",latex(num/den),'$$')
if warn:
shiro.warning(shiro.translation[
'Warning: intervals contain backwards dependencies. Consider changing order of variables and intervals.'])
if values and numden:
return num,den,values
elif values:
return num/den,values
elif numden:
return num,den
else:
return num/den | 238923ac5e6e9e577f90091b61b6182323fdee75 | 13,239 |
def generateKey(accountSwitchKey=None,keytype=None):
""" Generate Key"""
genKeyEndpoint = '/config-media-live/v2/msl-origin/generate-key'
if accountSwitchKey:
params = {'accountSwitchKey': accountSwitchKey}
params["type"] = keytype
key = prdHttpCaller.getResult(genKeyEndpoint, params)
else:
parms = {'type':keytype}
key = prdHttpCaller.getResult(genKeyEndpoint,params)
return key | 5da825d809fbb03b5929bcc14f0da0451eaf639a | 13,240 |
def positions_count_for_one_ballot_item_doc_view(request):
"""
Show documentation about positionsCountForOneBallotItem
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = positions_count_for_one_ballot_item_doc.positions_count_for_one_ballot_item_doc_template_values(
url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values) | 3c56d186560fa8fae6117b6b656ee0c8d40a8728 | 13,241 |
def atcab_sign_base(mode, key_id, signature):
"""
Executes the Sign command, which generates a signature using the ECDSA algorithm.
Args:
mode Mode determines what the source of the message to be signed (int)
key_id Private key slot used to sign the message. (int)
signature Signature is returned here. Format is R and S integers in
big-endian format. 64 bytes for P256 curve (Expects bytearray)
Returns:
Stauts code
"""
if not isinstance(signature, bytearray):
status = Status.ATCA_BAD_PARAM
else:
c_signature = create_string_buffer(64)
status = get_cryptoauthlib().atcab_sign_base(mode, key_id, byref(c_signature))
signature[0:] = bytes(c_signature.raw)
return status | 87c9770d6ba456947206ea1abb46b10a3f413811 | 13,242 |
from typing import List
def load_gs(
gs_path: str,
src_species: str = None,
dst_species: str = None,
to_intersect: List[str] = None,
) -> dict:
"""Load the gene set file (.gs file).
Parameters
----------
gs_path : str
Path to the gene set file with the following two columns, separated by tab:
- 'TRAIT'
- 'GENESET':
(1) <gene1>,<gene2>,... each gene will be weighted uniformly or
(2) <gene1>:<weight1>,<gene2>:<weight2>,... each gene will be weighted by its weight.
src_species : str, default=None
Source species, must be either 'mmusculus' or 'hsapiens' if not None
dst_species : str, default=None
Destination species, must be either 'mmusculus' or 'hsapiens' if not None
to_intersect : List[str], default None.
Gene list to intersect with the input .gs file.
Returns
-------
dict_gs : dict
Dictionary of gene sets: {
trait1: (gene_list, gene_weight_list),
trait2: (gene_list, gene_weight_list),
...
}
"""
assert (src_species is None) == (
dst_species is None
), "src_species and dst_species must be both None or not None"
# Load homolog map dict_map; only needed when src_species and dst_species
# are not None and different.
if ((src_species is not None) & (dst_species is not None)) and (
src_species != dst_species
):
dict_map = load_homolog_mapping(src_species, dst_species) # type: ignore
else:
dict_map = None # type: ignore
# Load gene set file
dict_gs = {}
df_gs = pd.read_csv(gs_path, sep="\t")
for i, (trait, gs) in df_gs.iterrows():
gs_info = [g.split(":") for g in gs.split(",")]
if np.all([len(g) == 1 for g in gs_info]):
# if all genes are weighted uniformly
dict_weights = {g[0]: 1.0 for g in gs_info}
elif np.all([len(g) == 2 for g in gs_info]):
# if all genes are weighted by their weights
dict_weights = {g[0]: float(g[1]) for g in gs_info}
else:
raise ValueError(f"gene set {trait} contains genes with invalid format")
# Convert species if needed
# convert gene list to homologs, if gene can not be mapped, remove it
# in both gene list and gene weight
if dict_map is not None:
dict_weights = {
dict_map[g]: w for g, w in dict_weights.items() if g in dict_map
}
# Intersect with other gene sets
if to_intersect is not None:
to_intersect = set(to_intersect)
dict_weights = {g: w for g, w in dict_weights.items() if g in to_intersect}
gene_list = list(dict_weights.keys())
dict_gs[trait] = (
gene_list,
[dict_weights[g] for g in gene_list],
)
return dict_gs | 0e13c355b1a3bd7e88785844262a01c6963ef0ee | 13,243 |
from .points import remove_close
def sample_surface_even(mesh, count, radius=None):
"""
Sample the surface of a mesh, returning samples which are
VERY approximately evenly spaced.
This is accomplished by sampling and then rejecting pairs
that are too close together.
Parameters
---------
mesh : trimesh.Trimesh
Geometry to sample the surface of
count : int
Number of points to return
radius : None or float
Removes samples below this radius
Returns
---------
samples : (count, 3) float
Points in space on the surface of mesh
face_index : (count,) int
Indices of faces for each sampled point
"""
# guess radius from area
if radius is None:
radius = np.sqrt(mesh.area / (3 * count))
# get points on the surface
points, index = sample_surface(mesh, count * 3)
# remove the points closer than radius
points, mask = remove_close(points, radius)
# we got all the samples we expect
if len(points) >= count:
return points[:count], index[mask][:count]
# warn if we didn't get all the samples we expect
util.log.warning('only got {}/{} samples!'.format(
len(points), count))
return points, index[mask] | 9dd9c4aa27f4811511d81ef0c8dabe3097026061 | 13,244 |
def allocate_probabilities(results, num_substations, probabilities):
"""
Allocate cumulative probabilities.
Parameters
----------
results : list of dicts
All iterations generated in the simulation function.
num_substations : list
The number of electricity substation nodes we wish to select for each scenario.
probabilities : list
Contains the cumulative probabilities we wish to use.
Returns
-------
output : list of dicts
Contains all generated results.
"""
output = []
for nodes in num_substations:
ranked_data = add_cp(results, nodes, probabilities)
for probability in probabilities:
scenario = min(
ranked_data,
key=lambda x: abs(float(x["cum_probability"]) - probability)
)
output.append(scenario)
return output | 5692517aa55776c94c7f4027f175eab985000fe1 | 13,245 |
def import_one_record_sv01(r, m):
"""Import one ODK Site Visit 0.1 record into WAStD.
Arguments
r The record as dict, e.g.
{
"instanceID": "uuid:cc7224d7-f40f-4368-a937-1eb655e0203a",
"observation_start_time": "2017-03-08T07:10:43.378Z",
"reporter": "florianm",
"photo_start": {
"filename": "1488957113670.jpg",
"type": "image/jpeg",
"url": "https://..."
},
"transect": "-31.9966142 115.88456594 0.0 0.0;",
"photo_finish": {
"filename": "1488957172832.jpg",
"type": "image/jpeg",
"url": "https://..."
},
"comments": null,
"observation_end_time": "2017-03-08T07:13:23.317Z"
}
m The mapping of ODK to WAStD choices
All existing records will be updated.
Make sure to skip existing records which should be retained unchanged.
Creates a Survey, e.g.
{
'started_on': datetime.datetime(2017, 1, 31, 16, 0, tzinfo=<UTC>),
'finished_on': datetime.datetime(2017, 2, 4, 16, 0, tzinfo=<UTC>),
'site_id': 17,
'source': 'direct',
'source_id': None,
'transect': None,
'comments': '',
}
"""
src_id = r["instanceID"]
new_data = dict(
source="odk",
source_id=src_id,
site_id=17, # TODO: reconstruct site on Survey if not given
transect=read_odk_linestring(r["transect"]),
started_on=parse_datetime(r["observation_start_time"]),
finished_on=parse_datetime(r["observation_end_time"]),
# m["users"][r["reporter"]],
comments=r["comments"]
)
if Survey.objects.filter(source_id=src_id).exists():
logger.debug("Updating unchanged existing record {0}...".format(src_id))
Survey.objects.filter(source_id=src_id).update(**new_data)
e = Survey.objects.get(source_id=src_id)
else:
logger.debug("Creating new record {0}...".format(src_id))
e = Survey.objects.create(**new_data)
e.save()
# MediaAttachments
handle_media_attachment(e, r["photo_start"], title="Site conditions at start of suvey")
handle_media_attachment(e, r["photo_finish"], title="Site conditions at end of suvey")
logger.info(" Saved {0}\n".format(e))
e.save()
return e | ac6cbd8c743241000adbc3395da48b926f71ee78 | 13,246 |
def delete_page_groups(request_ctx, group_id, url, **request_kwargs):
"""
Delete a wiki page
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param group_id: (required) ID
:type group_id: string
:param url: (required) ID
:type url: string
:return: Delete page
:rtype: requests.Response (with Page data)
"""
path = '/v1/groups/{group_id}/pages/{url}'
url = request_ctx.base_api_url + path.format(group_id=group_id, url=url)
response = client.delete(request_ctx, url, **request_kwargs)
return response | d9802d66e87eb0ef9e9fe5bee98686caade3e79d | 13,247 |
import os
def envset(name):
"""Return True if the given environment variable is set
An environment variable is considered set if it is assigned to a value
other than 'no', 'n', 'false', 'off', '0', or '0.0' (case insensitive)
"""
return os.environ.get(name, 'no').lower() not in ['no', 'n',
'false', 'off',
'0', '0.0'] | cb501aa5e106e342d7c8e02bc46f5f0a2621dc00 | 13,248 |
def solver(f, p_e, mesh, degree=1):
"""
Solving the Darcy flow equation on a unit square media with pressure boundary conditions.
"""
# Creating mesh and defining function space
V = FunctionSpace(mesh, 'P', degree)
# Defining Dirichlet boundary
p_L = Constant(1.0)
def boundary_L(x, on_boundary):
return on_boundary and near(x[0], 0)
bc_L = DirichletBC(V, p_L, boundary_L)
p_R = Constant(0.0)
def boundary_R(x, on_boundary):
return on_boundary and near(x[0], 1)
bc_R = DirichletBC(V, p_R, boundary_R)
bcs = [bc_L, bc_R]
# If p = p_e on the boundary, then use:-
#def boundary(x, on_boundary):
#return on_boundary
#bc = DirichletBC(V, p_e, boundary)
# Defining variational problem
p = TrialFunction(V)
v = TestFunction(V)
d = 2
I = Identity(d)
M = Expression('fmax(0.10, exp(-pow(10.0*x[1]-1.0*sin(10.0*x[0])-5.0, 2)))', degree=2, domain=mesh)
K = M*I
a = dot(K*grad(p), grad(v))*dx
L = inner(f, v)*dx
# Computing Numerical Pressure
p = Function(V)
solve(a == L, p, bcs)
return p | 0630b1199f064044976c24d825332aa2d879dab2 | 13,249 |
def set_stretchmatrix(coefX=1.0, coefY=1.0):
"""Stretching matrix
Args:
coefX:
coefY:coefficients (float) for the matrix
[coefX 0
0 coefY]
Returns:
strectching_matrix: matrix
"""
return np.array([[coefX, 0],[0, coefY]]) | cd11bf0351a205e52b1f99893fe43709636978d3 | 13,250 |
def set_bit(v, index, x):
"""Set the index:th bit of v to 1 if x is truthy, else to 0, and return the new value."""
mask = 1 << index # Compute mask, an integer with just bit 'index' set.
v &= ~mask # Clear the bit indicated by the mask (if x is False)
if x:
v |= mask # If x was True, set the bit indicated by the mask.
return v | 627744c06709eecec18f0c5956f1af4c57a57b8a | 13,251 |
def test_credentials() -> (str, str):
"""
Read ~/.synapseConfig and retrieve test username and password
:return: endpoint, username and api_key
"""
config = _get_config()
return config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_USERNAME_OPT),\
config.get(DEFAULT_CONFIG_AUTH_SECTION, DEFAULT_CONFIG_PASSWORD_OPT) | 2ceb441b338b5ea8b1154c19406af9b1e21ed85e | 13,252 |
import re
def BCA_formula_from_str(BCA_str):
"""
Get chemical formula string from BCA string
Args:
BCA_str: BCA ratio string (e.g. 'B3C1A1')
"""
if len(BCA_str)==6 and BCA_str[:3]=='BCA':
# format: BCAxyz. suitable for single-digit integer x,y,z
funits = BCA_str[-3:]
else:
# format: BxCyAz. suitable for multi-digit or non-integer x,y,z
funits = re.split('[BCA]',BCA_str)
funits = [u for u in funits if len(u) > 0]
funits
components = ['BaO','CaO','Al2O3']
formula = ''.join([f'({c}){n}' for c,n in zip(components, funits)])
return formula | 36375e62d70995628e253ba68ba8b777eb88d728 | 13,253 |
import numpy
def get_strongly_connected_components(graph):
"""
Get strongly connected components for a directed graph
The returned list of components is in reverse topological order, i.e.,
such that the nodes in the first component have no dependencies on
other components.
"""
nodes = list(graph.keys())
node_index_by_node = {node: index for index, node in enumerate(nodes)}
row_indexes = []
col_indexes = []
for node, targets in graph.items():
row_indexes += [node_index_by_node[node]] * len(targets)
col_indexes += [node_index_by_node[target] for target in targets]
data = numpy.ones((len(row_indexes)), dtype=int)
n_nodes = len(nodes)
csgraph = csr_matrix((data, (row_indexes, col_indexes)), shape=(n_nodes, n_nodes))
n_components, labels = connected_components(csgraph, directed=True, connection='strong')
sccs = [[] for i in range(n_components)]
for index, label in enumerate(labels):
sccs[label] += [nodes[index]]
return [frozenset(scc) for scc in sccs] | 42783756d8fdada032e2d0ca8a306f10d4977e16 | 13,254 |
def dot(a, b):
"""
Computes a @ b, for a, b of the same rank (both 2 or both 3).
If the rank is 2, then the innermost dimension of `a` must match the
outermost dimension of `b`.
If the rank is 3, the first dimension of `a` and `b` must be equal and the
function computes a batch matmul.
Supports both dense and sparse multiplication (including sparse-sparse).
:param a: Tensor or SparseTensor with rank 2 or 3.
:param b: Tensor or SparseTensor with same rank as b.
:return: Tensor or SparseTensor with rank 2 or 3.
"""
a_ndim = K.ndim(a)
b_ndim = K.ndim(b)
assert a_ndim == b_ndim, "Expected equal ranks, got {} and {}" "".format(
a_ndim, b_ndim
)
a_is_sparse = K.is_sparse(a)
b_is_sparse = K.is_sparse(b)
# Handle cases: rank 2 sparse-dense, rank 2 dense-sparse
# In these cases we can use the faster sparse-dense matmul of tf.sparse
if a_ndim == 2:
if a_is_sparse and not b_is_sparse:
return tf.sparse.sparse_dense_matmul(a, b)
if not a_is_sparse and b_is_sparse:
return ops.transpose(
tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
)
# Handle cases: rank 2 sparse-sparse, rank 3 sparse-dense,
# rank 3 dense-sparse, rank 3 sparse-sparse
# In these cases we can use the tfsp.CSRSparseMatrix implementation (slower,
# but saves memory)
if a_is_sparse:
a = tfsp.CSRSparseMatrix(a)
if b_is_sparse:
b = tfsp.CSRSparseMatrix(b)
if a_is_sparse or b_is_sparse:
out = tfsp.matmul(a, b)
if hasattr(out, "to_sparse_tensor"):
return out.to_sparse_tensor()
else:
return out
# Handle case: rank 2 dense-dense, rank 3 dense-dense
# Here we use the standard dense operation
return tf.matmul(a, b) | 789c9d045d82eb048ff5319d5a7ae99ffb02376d | 13,255 |
def unreshuffle_2d(x, i0, shape):
"""Undo the reshuffle_2d operation."""
x_flat = unreshuffle_1d(x, i0)
x_rev = np.reshape(x_flat, shape)
x_rev[1::2, :] = x_rev[1::2, ::-1] # reverse all odd rows
return x_rev | 72cf59b9e547cf2eb516fca33e9eea7d01c1702b | 13,256 |
def findNodeJustBefore(target, nodes):
"""
Find the node in C{nodes} which appeared immediately before C{target} in
the input document.
@type target: L{twisted.web.microdom.Element}
@type nodes: C{list} of L{twisted.web.microdom.Element}
@return: An element from C{nodes}
"""
result = None
for node in nodes:
if comparePosition(target, node) < 0:
return result
result = node
return result | 85d435a2c10dbaabb544c81d440e2110a6083dd7 | 13,257 |
def _format_line(submission, position, rank_change, total_hours):
"""
Formats info about a single post on the front page for logging/messaging. A single post will look like this:
Rank Change Duration Score Flair Id User Slug
13. +1 10h 188 [Episode](gkvlja) <AutoLovepon> <arte_episode_7_discussion>
"""
line = "{:3}".format(f"{position}.")
if rank_change is None:
line += " (new) "
elif rank_change != 0:
line += " {:7}".format(f"{rank_change:+d} {total_hours}h")
else:
line += " {:7}".format(f"-- {total_hours}h")
line += f" {submission.score:>5}"
line += " {:>24}".format(f"[{submission.link_flair_text}]({submission.id})")
line += f" <{submission.author.name}>"
line += f" <{reddit_utils.slug(submission)}>"
return line | 70840d0e57194b43fbaf0352ebecdfefa74bd4d7 | 13,258 |
from typing import List
from typing import Dict
from typing import Any
from typing import Tuple
def run_erasure( # pylint: disable = too-many-arguments
privacy_request: PrivacyRequest,
policy: Policy,
graph: DatasetGraph,
connection_configs: List[ConnectionConfig],
identity: Dict[str, Any],
access_request_data: Dict[str, List[Row]],
) -> Dict[str, int]:
"""Run an erasure request"""
traversal: Traversal = Traversal(graph, identity)
with TaskResources(privacy_request, policy, connection_configs) as resources:
def collect_tasks_fn(
tn: TraversalNode, data: Dict[CollectionAddress, GraphTask]
) -> None:
"""Run the traversal, as an action creating a GraphTask for each traversal_node."""
if not tn.is_root_node():
data[tn.address] = GraphTask(tn, resources)
env: Dict[CollectionAddress, Any] = {}
traversal.traverse(env, collect_tasks_fn)
def termination_fn(*dependent_values: int) -> Tuple[int, ...]:
"""The dependent_values here is an int output from each task feeding in, where
each task reports the output of 'task.rtf(access_request_data)', which is the number of
records updated.
The termination function just returns this tuple of ints."""
return dependent_values
dsk: Dict[CollectionAddress, Any] = {
k: (t.erasure_request, access_request_data[str(k)]) for k, t in env.items()
}
# terminator function waits for all keys
dsk[TERMINATOR_ADDRESS] = (termination_fn, *env.keys())
v = dask.delayed(get(dsk, TERMINATOR_ADDRESS))
update_cts: Tuple[int, ...] = v.compute()
# we combine the output of the termination function with the input keys to provide
# a map of {collection_name: records_updated}:
erasure_update_map: Dict[str, int] = dict(
zip([str(x) for x in env], update_cts)
)
return erasure_update_map | 2b9579ca1c4960da46ee7bb1388ab667dc808f40 | 13,259 |
from .build_helper import get_script_module
import sys
import os
def write_module_scripts(folder, platform=sys.platform, blog_list=None,
default_engine_paths=None, command=None):
"""
Writes a couple of scripts which allow a user to be faster on some tasks
or to easily get information about the module.
@param folder where to write the script
@param platform platform
@param blog_list blog list to follow, should be attribute ``__blog__`` of the module
@param command None to generate scripts for all commands or a value in *[blog, doc]*.
@param default_engine_paths default engines (or python distributions)
@return list of written scripts
The function produces the following files:
* *auto_rss_list.xml*: list of rss stream to follow
* *auto_rss_database.db3*: stores blog posts
* *auto_rss_server.py*: runs a server which updates the scripts and runs a server. It also open the default browser.
* *auto_rss_server.(bat|sh)*: run *auto_run_server.py*, the file on Linux might be missing if there is an equivalent python script
.. faqref::
:title: How to generate auto_rss_server.py?
The following code generates the script *auto_rss_local.py*
which runs a local server to read blog posts included
in the documentation (it uses module
`pyrsslocal <http://www.xavierdupre.fr/app/pyrsslocal/helpsphinx/index.html>`_)::
from pyquickhelper.pycode import write_module_scripts, __blog__
write_module_scripts(".", blog_list=__blog__, command="blog")
"""
# delayed import
default_set = {"blog", "doc"}
if command is not None:
if command not in default_set:
raise ValueError( # pragma: no cover
"command {0} is not available in {1}".format(command, default_set))
commands = {command}
else:
commands = default_set
res = []
for c in commands:
sc = get_script_module(
c, platform=sys.platform, blog_list=blog_list, default_engine_paths=default_engine_paths)
if sc is None:
continue # pragma: no cover
tobin = os.path.join(folder, "bin")
if not os.path.exists(tobin):
os.mkdir(tobin)
for item in sc:
if isinstance(item, tuple):
name = os.path.join(folder, "bin", item[0])
with open(name, "w", encoding="utf8") as f:
f.write(item[1])
res.append(name)
else: # pragma: no cover
name = os.path.join(
folder, "bin", "auto_run_%s.%s" % (c, get_script_extension()))
with open(name, "w") as f:
f.write(item)
res.append(name)
return res | fe8921b4440b78f6788fe4a41874da1f9fb8228d | 13,260 |
def RichTextBuffer_FindHandlerByName(*args, **kwargs):
"""RichTextBuffer_FindHandlerByName(String name) -> RichTextFileHandler"""
return _richtext.RichTextBuffer_FindHandlerByName(*args, **kwargs) | 1122822db4885b6d745f4e893522fb01b988ee3f | 13,261 |
def likelihood(tec, phase, tec_conversion, lik_sigma, K = 2):
"""
Get the likelihood of the tec given phase data and lik_var variance.
tec: tensor B, 1
phase: tensor B, Nf
tec_conversion: tensor Nf
lik_sigma: tensor B, 1 (Nf)
Returns:
log_prob: tensor (B,1)
"""
mu = wrap(tec*tec_conversion[None,:])# B, Nf
phase = wrap(phase)
#K, B, Nf
d = tf.stack([tf.distributions.Normal(mu + tf.convert_to_tensor(k*2*np.pi,float_type),
lik_sigma).log_prob(phase) for k in range(-K,K+1,1)], axis=0)
#B, Nf -> B
log_lik = tf.reduce_sum(tf.reduce_logsumexp(d, axis=0), axis=1)
# B, 1
# tec_mu = tf.gather(tec, neighbour)
# tec_std = 0.001 * tf.exp(-0.25*neighbour_dist**2)
# tec_prior = tf.distributions.Normal(tec_mu, tec_std).log_prob(tec)
# sigma_priors = log_normal_solve(0.2,0.1)
# #B, 1
# sigma_prior = tf.distributions.Normal(
# tf.convert_to_tensor(sigma_priors[0],dtype=float_type),
# tf.convert_to_tensor(sigma_priors[1],dtype=float_type)).log_prob(tf.log(lik_sigma)) - tf.log(lik_sigma)
#B, 1
log_prob = log_lik[:,None]# + tec_prior # + sigma_prior
return -log_prob | c5f566484ee8f8cbc48e5302365f0e06c81f49e3 | 13,262 |
def angle_between(v1, v2):
"""Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
# https://stackoverflow.com/a/13849249/782170
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
cos_theta = np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)
if cos_theta > 0:
return np.arccos(cos_theta)
else:
return np.arccos(cos_theta) - np.pi / 2.0 | fc2246c9d3fb55a0c2f692c1533e821a187599b8 | 13,263 |
from aiida.engine import ExitCode
from ase.lattice.cubic import FaceCenteredCubic
from ase.lattice.cubic import BodyCenteredCubic
def create_substrate_bulk(wf_dict_node):
"""
Calcfunction to create a bulk structure of a substrate.
:params wf_dict: AiiDA dict node with at least keys lattice, host_symbol and latticeconstant
(If they are not there, raises KeyError)
Lattice key supports only fcc and bcc
raises ExitCode 380, ERROR_NOT_SUPPORTED_LATTICE
"""
wf_dict = wf_dict_node.get_dict()
lattice = wf_dict['lattice']
if lattice == 'fcc':
structure_factory = FaceCenteredCubic
elif lattice == 'bcc':
structure_factory = BodyCenteredCubic
else:
return ExitCode(380, 'ERROR_NOT_SUPPORTED_LATTICE', message='Specified substrate has to be bcc or fcc.')
directions = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
host_symbol = str(wf_dict['host_symbol'])
latticeconstant = float(wf_dict['latticeconstant'])
size = (1, 1, 1)
structure = structure_factory(directions=directions,
symbol=host_symbol,
pbc=(1, 1, 1),
latticeconstant=latticeconstant,
size=size)
return StructureData(ase=structure) | cb232093c3f6866bd14d2b19115ef988f279bf2f | 13,264 |
def run_Net_on_multiple(patchCreator, input_to_cnn_depth=1, cnn = None,
str_data_selection="all", save_file_prefix="",
apply_cc_filtering = False, output_filetype = 'h5', save_prob_map = False):
""" run runNetOnSlice() on neighbouring blocks of data.
if opt_cnn is not none, it should point to a CNN / Net that will be used.
if patchCreator contains a list of 3D data blocks (patchCreator.second_input_data) then it will be used as second input to cnn.output()
"""
assert str_data_selection in ["all", "train", "test"]
MIN = 0 if str_data_selection in ["all", "train"] else patchCreator.training_set_size
MAX = patchCreator.training_set_size if str_data_selection =="train" else len(patchCreator.data)
second_input_data = None
DATA = patchCreator.data
timings=[]
# if hasattr(patchCreator,"second_input_data"):
# second_input_data = patchCreator.second_input_data[opt_list_index]
for opt_list_index in range(MIN, MAX):
print "-"*30
print "@",opt_list_index+1,"of max.",len(patchCreator.data)
postfix = "" if opt_list_index==None else "_" + utilities.extract_filename(patchCreator.file_names[opt_list_index])[1] if isinstance(patchCreator.file_names[0], str) else str(patchCreator.file_names[opt_list_index]) if not isinstance(patchCreator.file_names[opt_list_index], tuple) else utilities.extract_filename(patchCreator.file_names[opt_list_index][0])[1]
if opt_list_index is not None:
is_training = "_train" if (opt_list_index < patchCreator.training_set_size) else "_test"
else:
is_training=""
this_save_name = save_file_prefix+"prediction"+postfix+"_"+is_training
t0 = time.clock()
sav = run_Net_on_Block(cnn, DATA[opt_list_index], patchCreator, bool_predicts_on_softmax=1,
second_input_data = second_input_data) #this one does all the work
t1 = time.clock()
timings.append(t1-t0)
if apply_cc_filtering:
sav = remove_small_conneceted_components(sav)
sav = 1 - remove_small_conneceted_components(1 - sav)
save_pred(sav, this_save_name, output_filetype, save_prob_map)
print 'timings (len',len(timings),')',np.mean(timings),'+-',np.std(timings)
return None | 9d4c9c2fa3248258299243e3d7585362f47776a2 | 13,265 |
def user_to_janrain_capture_dict(user):
"""Translate user fields into corresponding Janrain fields"""
field_map = getattr(settings, 'JANRAIN', {}).get('field_map', None)
if not field_map:
field_map = {
'first_name': {'name': 'givenName'},
'last_name': {'name': 'familyName'},
'email': {'name': 'email'},
'username': {'name': 'displayName'},
}
result = {}
for field in user._meta.fields:
if field.name in field_map:
fm = field_map[field.name]
value = getattr(user, field.name)
func = fm.get('function', None)
if func:
value = func(value)
# Plurals are dot delimited
parts = fm['name'].split('.')
key = parts[0]
if len(parts) == 1:
result[key] = value
else:
result.setdefault(key, {})
result[key][parts[1]] = value
return result | 767b7003a282e481c1d1753f4c469fec42a5002a | 13,266 |
from typing import Callable
from typing import Optional
from typing import Generator
def weighted(generator: Callable, directed: bool = False, low: float = 0.0, high: float = 1.0,
rng: Optional[Generator] = None) -> Callable:
"""
Takes as input a graph generator and returns a new generator function that outputs weighted
graphs. If the generator is dense, the output will be the weighted adjacency matrix. If the
generator is sparse, the new function will return a tuple (adj_list, weights).
Parameters
----------
generator : Callable
A callable that generates graphs
directed: bool
Whether to generate weights for directed graphs
low : float, optional
Lower boundary of the sampling distribution interval,
i.e., interval in [low, high), by default 0.0
high : float, optional
Upper boundary of the sampling distribution interval,
i.e., interval in [low, high), by default 1.0
rng : Generator, optional
Numpy random number generator, by default None
Returns
-------
Callable
A callable that generates weighted graphs
Examples
--------
>> weighted(erdos_renyi)(num_nodes=100, prob=0.5)
"""
if rng is None:
rng = default_rng()
def weighted_generator(*args, **kwargs):
adj = generator(*args, **kwargs)
if adj.shape[0] == adj.shape[1]:
num_nodes = adj.shape[0]
weights = rng.uniform(low=low, high=high, size=(num_nodes, num_nodes))
if not directed:
weights = np.triu(weights)
weights = weights + weights.T
adj = adj.astype(float) * weights
return adj
weights = rng.uniform(low=low, high=high, size=(adj.shape[0], 1))
return adj, weights
return weighted_generator | d4c4d0b93784ca7bee22ecaffc3e8005315aa631 | 13,267 |
import argparse
import os
def arg_parse(dataset, view, num_shots=2, cv_number=5):
"""
arguments definition method
"""
parser = argparse.ArgumentParser(description='Graph Classification')
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--v', type=str, default=1)
parser.add_argument('--data', type=str, default='Sample_dataset', choices = [ f.path[5:] for f in os.scandir("data") if f.is_dir() ])
parser.add_argument('--dataset', type=str, default=dataset,
help='Dataset')
parser.add_argument('--view', type=int, default=view,
help = 'view index in the dataset')
parser.add_argument('--num_epochs', type=int, default=1, #50
help='Training Epochs')
parser.add_argument('--num_shots', type=int, default=num_shots, #100
help='number of shots')
parser.add_argument('--cv_number', type=int, default=cv_number,
help='number of validation folds.')
parser.add_argument('--NormalizeInputGraphs', default=False, action='store_true',
help='Normalize Input adjacency matrices of graphs')
parser.add_argument('--evaluation_method', type=str, default='model assessment',
help='evaluation method, possible values : model selection, model assessment')
parser.add_argument('--threshold', dest='threshold', default='mean',
help='threshold the graph adjacency matrix. Possible values: no_threshold, median, mean')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--num-classes', dest='num_classes', type=int, default=2,
help='Number of label classes')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8,
help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8,
help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.8,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2,
help='Alpha for the leaky_relu.')
return parser.parse_args() | 4758fb939584f0433fb669fd03939d54c498f375 | 13,268 |
def generate_sampled_graph_and_labels(triplets, sample_size, split_size,
num_rels, adj_list, degrees,
negative_rate,tables_id, sampler="uniform"):
"""Get training graph and signals
First perform edge neighborhood sampling on graph, then perform negative
sampling to generate negative samples
"""
# perform edge neighbor sampling
if sampler == "uniform":
edges = sample_edge_uniform(adj_list, degrees, len(triplets), sample_size)
elif sampler == "neighbor":
edges = sample_edge_neighborhood(adj_list, degrees, len(triplets), sample_size,tables_id)
else:
raise ValueError("Sampler type must be either 'uniform' or 'neighbor'.")
# relabel nodes to have consecutive node ids
edges = triplets[edges]
src, rel, dst = edges.transpose()
# my_graph = nx.Graph()
# edges_to_draw = list(set(list(zip(dst, src, rel))))
# edges_to_draw = sorted(edges_to_draw)
# # my_graph.add_edges_from(edges_to_draw[:10])
#
# for item in edges_to_draw:
# my_graph.add_edge(item[1], item[0], weight=item[2]*10)
# pos = nx.spring_layout(my_graph)
# labels = nx.get_edge_attributes(my_graph, 'weight')
# plt.figure()
# nx.draw(my_graph, pos, edge_color='black', width=1, linewidths=1, arrows=True,
# node_size=100, node_color='red', alpha=0.9,
# labels={node: node for node in my_graph.nodes()})
# nx.draw_networkx_edge_labels(my_graph, pos, edge_labels=labels, font_color='red')
# plt.axis('off')
# plt.show()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
relabeled_edges = np.stack((src, rel, dst)).transpose()
# negative sampling
samples, labels = negative_sampling(relabeled_edges, len(uniq_v),
negative_rate)
#samples, labels = negative_relations(relabeled_edges, len(uniq_v),
# negative_rate)
# further split graph, only half of the edges will be used as graph
# structure, while the rest half is used as unseen positive samples
split_size = int(sample_size * split_size)
graph_split_ids = np.random.choice(np.arange(sample_size),
size=split_size, replace=False)
src = src[graph_split_ids]
dst = dst[graph_split_ids]
rel = rel[graph_split_ids]
# build DGL graph
print("# sampled nodes: {}".format(len(uniq_v)))
print("# sampled edges: {}".format(len(src) * 2))
#g, rel, norm,_ = build_graph_from_triplets_modified(len(uniq_v), num_rels,
# (src, rel, dst))
g, rel, norm=build_graph_directly(len(uniq_v), (src, rel, dst))
return g, uniq_v, rel, norm, samples, labels | 7386eada0e0aa70478c063aa4525c62cbc997b2e | 13,269 |
import optparse
def ParseArgs():
"""Parses command line options.
Returns:
An options object as from optparse.OptionsParser.parse_args()
"""
parser = optparse.OptionParser()
parser.add_option('--android-sdk', help='path to the Android SDK folder')
parser.add_option('--android-sdk-tools',
help='path to the Android SDK platform tools folder')
parser.add_option('--R-package', help='Java package for generated R.java')
parser.add_option('--R-dir', help='directory to hold generated R.java')
parser.add_option('--res-dir', help='directory containing resources')
parser.add_option('--crunched-res-dir',
help='directory to hold crunched resources')
(options, args) = parser.parse_args()
if args:
parser.error('No positional arguments should be given.')
# Check that required options have been provided.
required_options = ('android_sdk', 'android_sdk_tools', 'R_package',
'R_dir', 'res_dir', 'crunched_res_dir')
for option_name in required_options:
if getattr(options, option_name) is None:
parser.error('--%s is required' % option_name.replace('_', '-'))
return options | 492894d0cb4faf004f386ee0f4285180d0a6c37d | 13,270 |
def get_interface_from_model(obj: Base) -> str:
"""
Transform the passed model object into an dispatcher interface name.
For example, a :class:``Label`` model will result in a string with the value `labels` being
returned.
:param obj: the model object
:return: the interface string
"""
try:
return obj.__tablename__
except AttributeError:
raise TypeError("Not a transformable model: ", obj) | 2ee8d1ac86b0d8d6433ace8d58483dbf91af997b | 13,271 |
import codecs
def get_text(string, start, end, bom=True):
"""This method correctly accesses slices of strings using character
start/end offsets referring to UTF-16 encoded bytes. This allows
for using character offsets generated by Rosette (and other softwares)
that use UTF-16 native string representations under Pythons with UCS-4
support, such as Python 3.3+ (refer to https://www.python.org/dev/peps/pep-0393/).
The offsets are adjusted to account for a UTF-16 byte order mark (BOM)
(2 bytes) and also that each UTF-16 logical character consumes 2 bytes.
'character' in this context refers to logical characters for the purpose of
character offsets; an individual character can consume up to 4 bytes (32
bits for so-called 'wide' characters) and graphemes can consume even more.
"""
if not isinstance(string, str):
raise ValueError('expected string to be of type str')
if not any(((start is None), isinstance(start, int))):
raise ValueError('expected start to be of type int or NoneType')
if not any(((end is None), isinstance(end, int))):
raise ValueError('expected end to be of type int or NoneType')
if start is not None:
start *= 2
if bom:
start += 2
if end is not None:
end *= 2
if bom:
end += 2
utf_16, _ = codecs.utf_16_encode(string)
sliced, _ = codecs.utf_16_decode(utf_16[start:end])
return sliced | ffe3c74a248215a82b0e0a5b105f5e4c94c8c2a8 | 13,272 |
import math
def init_distance(graph: dict, s: str) -> dict:
"""
初始化其他节点的距离为正无穷
防止后面字典越界
"""
distance = {s: 0}
for vertex in graph:
if vertex != s:
distance[vertex] = math.inf
return distance | dd8ceda3ca7435b5f02b7b47a363f017f796bc36 | 13,273 |
def read_cry_data(path):
"""
Read a cry file and extract the molecule's geometry.
The format should be as follows::
U_xx U_xy U_xz
U_yx U_yy U_yz
U_zx U_zy U_zz
energy (or comment, this is ignored for now)
ele0 x0 y0 z0
ele1 x1 y1 z1
...
elen xn yn zn
Where the U matrix is made of the unit cell basis vectors as column
vectors.
Parameters
----------
path : str
A path to a file to read
Returns
-------
val : LazyValues
An object storing all the data
"""
unit = []
coords = []
elements = []
with open(path, 'r') as f:
for line in f:
parts = line.strip().split()
if len(parts) == 3:
unit.append([float(x) for x in parts])
if len(parts) == 4:
elements.append(parts[0])
coords.append([float(x) for x in parts[1:]])
return LazyValues(elements=elements, coords=coords, unit_cell=unit) | 3a7a88e9d70c5f7499ad219602062ad2d852139b | 13,274 |
import torch
def get_camera_wireframe(scale: float = 0.3):
"""
Returns a wireframe of a 3D line-plot of a camera symbol.
"""
a = 0.5 * torch.tensor([-2, 1.5, 4])
b = 0.5 * torch.tensor([2, 1.5, 4])
c = 0.5 * torch.tensor([-2, -1.5, 4])
d = 0.5 * torch.tensor([2, -1.5, 4])
C = torch.zeros(3)
F = torch.tensor([0, 0, 3])
camera_points = [a, b, d, c, a, C, b, d, C, c, C, F]
lines = torch.stack([x.float() for x in camera_points]) * scale
return lines | 65bb8fa078f2f6f3edb38ac86da0603073fd413f | 13,275 |
def read_input(file):
"""
Args:
file (idx): binary input file.
Returns:
numpy: arrays for our dataset.
"""
with open(file, 'rb') as file:
z, d_type, d = st.unpack('>HBB', file.read(4))
shape = tuple(st.unpack('>I', file.read(4))[0] for d in range(d))
return np.frombuffer(file.read(), dtype=np.uint8).reshape(shape) | 91b10314a326380680898efdb8a7d15aa7a84f24 | 13,276 |
from typing import List
def maximum_segment_sum(input_list: List):
"""
Return the maximum sum of the segments of a list
Examples::
>>> from pyske.core import PList, SList
>>> maximum_segment_sum(SList([-5 , 2 , 6 , -4 , 5 , -6 , -4 , 3]))
9
>>> maximum_segment_sum(PList.from_seq([-33 , 22 , 11 , -44]))
33
>>> maximum_segment_sum(PList.from_seq([-33 , 22 , 0, 1, -3, 11 , -44, 30, -5, -13, 12]))
31
:param input_list: a PySke list of numbers
:return: a number, the maximum sum of the segments of a list
"""
best_sum, _ = input_list.map(int_to_tuple).reduce(max_and_sum, (0, 0))
return best_sum | a42b41f3a3b020e0bcba80b557c628b2a0805caf | 13,277 |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == '':
# files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'kp{year:2d}{month:02d}.tab'
out = pysat.Files.from_os(data_path=data_path,
format_str=format_str,
two_digit_year_break=94)
if not out.empty:
out.ix[out.index[-1]+pds.DateOffset(months=1)-
pds.DateOffset(days=1)] = out.iloc[-1]
out = out.asfreq('D', 'pad')
out = out + '_' + out.index.strftime('%Y-%m-%d')
return out
elif tag == 'forecast':
format_str = 'kp_forecast_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
return files
elif tag == 'recent':
format_str = 'kp_recent_{year:04d}-{month:02d}-{day:02d}.txt'
files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# pad list of files data to include most recent file under tomorrow
if not files.empty:
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
files.ix[files.index[-1]+pds.DateOffset(days=1)] = files.values[-1]
return files
else:
raise ValueError('Unrecognized tag name for Space Weather Index Kp')
else:
raise ValueError ('A data_path must be passed to the loading routine ' +
'for Kp') | a302202b7a12534186cfabe66a788df5c29b3266 | 13,278 |
def has_bookmark(uri):
"""
Returns true if the asset with given URI has been bookmarked by the
currently logged in user. Returns false if there is no currently logged
in user.
"""
if is_logged_in():
mongo.db.bookmarks.ensure_index('username')
mongo.db.bookmarks.ensure_index('asset.uri')
return mongo.db.bookmarks.find_one({
'username': current_user.username,
'asset.uri': uri
}) is not None
return False | 8571c8b50c28a0e8829a143b1d33fd549cfad213 | 13,279 |
import six
import tqdm
def evaluate(f, K, dataiter, num_steps):
"""Evaluates online few-shot episodes.
Args:
model: Model instance.
dataiter: Dataset iterator.
num_steps: Number of episodes.
"""
if num_steps == -1:
it = six.moves.xrange(len(dataiter))
else:
it = six.moves.xrange(num_steps)
it = tqdm(it, ncols=0)
results = []
for i, batch in zip(it, dataiter):
# Get features.
h = f(batch['x_s'], batch['y_s'])
if type(h) is tuple:
h, (beta, gamma, beta2, gamma2, count) = h
print('beta/count', np.stack([beta, count], axis=-1))
batch['beta'] = beta.numpy()
batch['gamma'] = gamma.numpy()
batch['beta2'] = beta2.numpy()
batch['gamma2'] = gamma2.numpy()
batch['count'] = count.numpy()
batch['h'] = h.numpy()
results.append(batch)
return results | a6feebd96907f01789dd74bfb22e5a5306010bf9 | 13,280 |
def find_period_of_function(eq,slopelist,nroots):
"""This function finds the Period of the function.
It then makes a list of x values that are that period apart.
Example Input: find_period_of_function(eq1,[0.947969,1.278602])
"""
global tan
s1 = slopelist[0]
s2 = slopelist[1]
if tan == 1:
T = 3.14159265359
else:
T = s2-s1
periodlist = []
for i in range(nroots):
periodlist.append(s1+T*i)
return periodlist | 491d853aa99a31348a3acce1c29cc508e7ab3b69 | 13,281 |
def merge_date_tags(path, k):
"""called when encountering only tags in an element ( no text, nor mixed tag and text)
Arguments:
path {list} -- path of the element containing the tags
k {string} -- name of the element containing the tags
Returns:
whatever type you want -- the value of the element
note : if you want
"""
l=k['#alldata']
#2015/01/01 12:10:30
# if "PubMedPubDate" in path[-1]:
if "date" in path[-1].lower():
month=None
year=None
day=None
hour=None
minute=None
r=""
# it should always be a dict with one key, and a subdict as value, containing an "#alldata" key
# {'month': {'#alldata': ['09']}}
for i in l:
# month
k = next(iter(i))
# ['09']
ad = i[k]['#alldata']
if k == "Year" and len(ad) == 1 and isinstance (ad[0], str):
year=ad[0]
elif k == "Month" and len(ad) == 1 and isinstance (ad[0], str):
month=ad[0]
elif k == "Day" and len(ad) == 1 and isinstance (ad[0], str):
day=ad[0]
elif k == "Hour" and len(ad) == 1 and isinstance (ad[0], str):
hour=ad[0]
if len(hour) == 1:
hour = "0"+hour
elif k == "Minute" and len(ad) == 1 and isinstance (ad[0], str):
minute=ad[0]
if len(minute) == 1:
minute = "0"+minute
if year is not None:
r=r+year
if month is not None:
r=r+"/"+month
if day is not None:
r=r+"/"+day
if hour is not None:
r=r+ " "+hour
if minute is not None:
r=r+":"+minute
#retrun only if at least "year" is present
return r
return k | 2ae3bd0dada288b138ee450103c0b4412a841336 | 13,282 |
def ocp_play():
"""Decorator for adding a method as an common play search handler."""
def real_decorator(func):
# Store the flag inside the function
# This will be used later to identify the method
if not hasattr(func, 'is_ocp_playback_handler'):
func.is_ocp_playback_handler = True
return func
return real_decorator | 9e96fe81b331820bf7485501e458cbb4efba4328 | 13,283 |
def _scatter(x_arr, y_arr, attributes, xlabel=None, xlim=None, xlog=False,
ylabel=None, ylim=None, ylog=False,
show=True, save=None):
"""Private plotting utility function."""
# initialise figure and axis settings
fig = plt.figure()
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
# plotting of a histogram
plt.scatter(x_arr, y_arr,
color=attributes['color'],
alpha=attributes['opacity'],
label=attributes['label'])
# ax.set_xticks(bins + 0.5)
# final axis setting
ax.set_xlim(xlim)
ax.set_xlabel(xlabel, color="black")
ax.set_xscale('log' if xlog==True else 'linear')
ax.set_ylim(ylim)
ax.set_ylabel(ylabel, color="black")
ax.set_yscale('log' if ylog==True else 'linear')
# add legend
if not attributes['label']==None:
legend = ax.legend(loc=0)
legend.get_frame().set_facecolor('white')
legend.get_frame().set_edgecolor('lightgrey')
# save/show figure
if save!=None:
plt.savefig(save, bbox_inches='tight')
if show:
plt.show(fig, block=False)
return fig, fig.axes | 8494f9398ea0e5d197a58ea341c626e03d47b028 | 13,284 |
def first_item(iterable, default=None):
"""
Returns the first item of given iterable.
Parameters
----------
iterable : iterable
Iterable
default : object
Default value if the iterable is empty.
Returns
-------
object
First iterable item.
"""
if not iterable:
return default
for item in iterable:
return item | f5ebbaea7cf4152382fb4b2854f68a3320d21fdc | 13,285 |
def stratification(n_subjects_per_strata, n_groups, block_length=4, seed=None):
""" Create a randomization list for each strata using Block Randomization.
If a study has several strata, each strata is seperately randomized using
block randomization.
Args:
n_subjects_per_strata: A list of the number of subjects for each
strata.
n_groups: The number of groups to randomize subjects to.
block_length: The length of the blocks.
seed: (optional) The seed to provide to the RNG.
Returns:
list: a list of length `len(n_subjects_per_strata)` of lists of length
`n_subjects_per_strata`. Each sublist is the strata specific
randomization list.
Notes:
The value of `block_length` should be a multiple of `n_groups`
to ensure proper balance.
Todo:
Allow for multiple randomization techniques to be used.
"""
groups = []
for n_subjects_per_stratum in n_subjects_per_strata:
# Adding 52490, a dummy value, to the seed ensures a different list
# per strata. The use of a 'magic number' here allows for
# reproducibility
if seed is not None:
seed = seed + 52490
groups.append(block(n_subjects_per_stratum, n_groups,
block_length, seed))
return groups | 22675c178b389e6d22586fb6640fb1adc1169996 | 13,286 |
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s)) | 086d2e50b083a869fff2c06b4da7c04975b19fa3 | 13,287 |
def FileDialog(prompt='ChooseFile', indir=''):
"""
opens a wx dialog that allows you to select a single
file, and returns the full path/name of that file """
dlg = wx.FileDialog(None,
message = prompt,
defaultDir = indir)
if dlg.ShowModal() == wx.ID_OK:
outfile = dlg.GetPath()
else:
outfile = None
dlg.Destroy()
return outfile | cef7816a40297a5920359b2dba3d7e3e6f6d11ec | 13,288 |
def my_account():
"""
Allows a user to manage their account
"""
user = get_user(login_session['email'])
if request.method == 'GET':
return render_template('myAccount.html', user=user)
else:
new_password1 = request.form.get('userPassword1')
new_password2 = request.form.get('userPassword2')
if new_password1 != new_password2:
flash("Passwords do not match!")
return render_template('myAccount.html', user=user)
user.hash_password(new_password1) # set the new password hash
session.add(user)
session.commit()
flash("Your password has been changed.")
return redirect(url_for('index')) | f3e762931201ed82fa07c7b08c8bc9913c3729dd | 13,289 |
from typing import List
def __pad_assertwith_0_array4D(grad: 'np.ndarray', pad_nums) -> 'np.ndarray':
"""
Padding arrary with 0 septally.
:param grad:
:param pad_nums:
:return:
"""
gN, gC, gH, gW = grad.shape
init1 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW), dtype = grad.dtype)
init2 = np.zeros((gN, gC, gH + (gH - 1) * pad_nums, gW + (gW - 1) * pad_nums), dtype = grad.dtype)
boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[2])]
init1[:, :, boolean, :] = grad
boolean: List[int] = [(pad_nums + 1) * i for i in range(grad.shape[3])]
init2[:, :, :, boolean] = init1
return init2 | 3562e42800c25a059f82a0d163e239badefdcfd3 | 13,290 |
def islist(data):
"""Check if input data is a list."""
return isinstance(data, list) | 98769191b0215f8f863047ccc0c37e4d0af0a444 | 13,291 |
def spatial_mean(xr_da, lon_name="longitude", lat_name="latitude"):
"""
Perform averaging on an `xarray.DataArray` with latitude weighting.
Parameters
----------
xr_da: xarray.DataArray
Data to average
lon_name: str, optional
Name of x-coordinate
lat_name: str, optional
Name of y-coordinate
Returns
-------
xarray.DataArray
Spatially averaged xarray.DataArray.
"""
weights = da.cos(da.deg2rad(xr_da[lat_name]))
res = xr_da.weighted(weights).mean(dim=[lon_name, lat_name])
return res | 5afb6cb9e9a6b88cc3368da4f3544ea9b7c217be | 13,292 |
def rank(value_to_be_ranked, value_providing_rank):
"""
Returns the rank of ``value_to_be_ranked`` in set of values, ``values``.
Works even if ``values`` is a non-orderable collection (e.g., a set).
A binary search would be an optimized way of doing this if we can constrain
``values`` to be an ordered collection.
"""
num_lesser = [v for v in value_providing_rank if v < value_to_be_ranked]
return len(num_lesser) | 18c2009eb59b62a2a3c63c69d55f84a6f51e5953 | 13,293 |
def Characteristics(aVector):
"""
Purpose:
Compute certain characteristic of data in a vector
Inputs:
aVector an array of data
Initialize:
iMean mean
iMed median
iMin minimum
iMax maximum
iKurt kurtosis
iSkew skewness
iStd standard deviation
Return value:
aResults an array with calculated characteristics
"""
iMin = aVector.min().values[0]
iMax = aVector.max().values[0]
iMean = np.mean(aVector).values[0]
iMed = np.median(aVector)
iKurt = st.kurtosis(aVector)[0]
iSkew = st.skew(aVector)[0]
iStd = aVector.std().values[0]
aResults = np.array([iMin,iMax, iMean,iMed,iKurt,iSkew,iStd])
return aResults | 228ba375a7fca9a4e13920e6eadd0dab83b0847c | 13,294 |
def loglik(alpha,gamma_list,M,k):
"""
Calculate $L_{[\alpha]}$ defined in A.4.2
"""
psi_sum_gamma=np.array(list(map(lambda x: psi(np.sum(x)),gamma_list))).reshape((M,1)) # M*1
psi_gamma=psi(np.array(gamma_list)) # M*k matrix
L=M*gammaln(np.sum(alpha)-np.sum(gammaln(alpha)))+np.sum((psi_gamma-psi_sum_gamma)*(alpha.reshape((1,k))-1))
return L | 233307f7ef4e350bec162199a5d0cd8c773b4151 | 13,295 |
from gum.indexer import indexer, NotRegistered
def handle_delete(sender_content_type_pk, instance_pk):
"""Async task to delete a model from the index.
:param instance_pk:
:param sender_content_type_pk:
"""
try:
sender_content_type = ContentType.objects.get(pk=sender_content_type_pk)
sender = sender_content_type.model_class()
instance = sender.objects.get(pk=instance_pk)
except ObjectDoesNotExist:
logger.warning("Object ({}, {}) not found".format(sender_content_type_pk, instance_pk))
return None
try:
mapping_type = indexer.get_mapping_type(sender)
mapping_type.delete_document(instance)
except NotRegistered:
return None
return sender_content_type_pk, instance_pk | 049af2041e3ea33bdfddf81c72aeb95b04e5f60c | 13,296 |
import os
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
python_utils.PRINT(
'Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, _, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for filename in filenames:
filepath = os.path.join(root, filename)
if should_file_be_built(filepath) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
# The path in hashes.json file is in posix style,
# see the comment above HASHES_JSON_FILENAME for details.
complete_filepath = common.convert_to_posixpath(
os.path.join(root, filename))
relative_filepath = common.convert_to_posixpath(os.path.relpath(
complete_filepath, directory_path))
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes | 95c9e4c3ff0455b784cca083838e3a4b7a0dd496 | 13,297 |
def fixedcase_word(w, truelist=None):
"""Returns True if w should be fixed-case, None if unsure."""
if truelist is not None and w in truelist:
return True
if any(c.isupper() for c in w[1:]):
# tokenized word with noninitial uppercase
return True
if len(w) == 1 and w.isupper() and w not in {'A', 'K', 'N'}:
# single uppercase letter
return True
if len(w) == 2 and w[1] == '.' and w[0].isupper():
# initial with period
return True | 9047866f7117e8b1e4090c8e217c3063cfd37c38 | 13,298 |
import torch
import types
def linspace(
start,
stop,
num=50,
endpoint=True,
retstep=False,
dtype=None,
split=None,
device=None,
comm=None,
):
"""
Returns num evenly spaced samples, calculated over the interval [start, stop]. The endpoint of the interval can
optionally be excluded.
Parameters
----------
start: scalar, scalar-convertible
The starting value of the sample interval, maybe a sequence if convertible to scalar
stop: scalar, scalar-convertible
The end value of the sample interval, unless is set to False. In that case, the sequence consists of all but the
last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint
is False.
num: int, optional
Number of samples to generate, defaults to 50. Must be non-negative.
endpoint: bool, optional
If True, stop is the last sample, otherwise, it is not included. Defaults to True.
retstep: bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype: dtype, optional
The type of the output array.
split: int, optional
The axis along which the array is split and distributed, defaults to None (no distribution).
device : str, ht.Device or None, optional
Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device).
comm: Communication, optional
Handle to the nodes holding distributed parts or copies of this tensor.
Returns
-------
samples: ht.DNDarray
There are num equally spaced samples in the closed interval [start, stop] or the half-open interval
[start, stop) (depending on whether endpoint is True or False).
step: float, optional
Size of spacing between samples, only returned if retstep is True.
Examples
--------
>>> ht.linspace(2.0, 3.0, num=5)
tensor([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> ht.linspace(2.0, 3.0, num=5, endpoint=False)
tensor([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> ht.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
"""
# sanitize input parameters
start = float(start)
stop = float(stop)
num = int(num)
if num <= 0:
raise ValueError(
"number of samples 'num' must be non-negative integer, but was {}".format(num)
)
step = (stop - start) / max(1, num - 1 if endpoint else num)
# sanitize device and comm
device = devices.sanitize_device(device)
comm = sanitize_comm(comm)
# infer local and global shapes
gshape = (num,)
split = sanitize_axis(gshape, split)
offset, lshape, _ = comm.chunk(gshape, split)
balanced = True
# compose the local tensor
start += offset * step
stop = start + lshape[0] * step - step
data = torch.linspace(start, stop, lshape[0], device=device.torch_device)
if dtype is not None:
data = data.type(types.canonical_heat_type(dtype).torch_type())
# construct the resulting global tensor
ht_tensor = dndarray.DNDarray(
data, gshape, types.canonical_heat_type(data.dtype), split, device, comm, balanced
)
if retstep:
return ht_tensor, step
return ht_tensor | 0597835fae9658f65553496b1272d786678919c2 | 13,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.