content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def mtci_vi(imgData, wave, mask=0, bands=[-1,-1,-1]):
"""
Function that calculates the MERIS Terrestrial Chlorophyll Index.
This functions uses wavelengths 753.75, 708.75, and 681.25 nm. The closest bands to these values will be used.
Citation: Dash, J. and Curran, P.J. 2004. The MERIS terrestrial chlorophyll index, International Journal of Remote Sensing, 25(23), 5403–5413.
INPUTS:
1) imgData: an array of hyperspectral data either as 3D [n_row x n_col x n_band] or 2D [n_row x n_band]
2) wave: an array of wavelengths in nanometers that correspond to the n_bands in imgData
3) mask: OPTIONAL - a binary array (same size as imgData) that designates which pixels should be included in analysis. Pixels with 1 are used, while pixels with 0 are not.
4) bands: OPTIONAL - if the user wants to define the bands used in the function provide the band index (not in nm) for each wavelength in this order [681.25, 708.75, 753.75 nm].
OUTPUTS:
1) vi: the calculated spectral index value for each pixel either returned as [n_row x n_col x 1] or [n_row x 1]
02/2020 - Susan Meerdink
"""
# Determine the bands used in function
if len(bands) == 3:
if bands[0] == -1:
idx_681 = (np.abs(wave - 681.25)).argmin()
else:
idx_681 = bands[0]
if bands[1] == -1:
idx_708 = (np.abs(wave - 708.75)).argmin()
else:
idx_708 = bands[1]
if bands[2] == -1:
idx_753 = (np.abs(wave - 753.75)).argmin()
else:
idx_753 = bands[2]
print('MTCI calls for bands 681.25, 708.75, and 753.75 nm. Using bands ' + str(wave[idx_681]) +', '+ str(wave[idx_708])+', '+ str(wave[idx_753]))
else:
raise Exception('Not enough band indexes are provided by user.')
# 3D data, hyperspectral image, [n_row x n_col x n_band]
if imgData.ndim > 2:
data_753 = np.reshape(imgData[:,:,idx_753],[-1,1])
data_708 = np.reshape(imgData[:,:,idx_708],[-1,1])
data_681 = np.reshape(imgData[:,:,idx_681],[-1,1])
# 2D data, flattened hyperspectral data, [n_row x n_band]
else:
data_753 = imgData[:,idx_753]
data_708 = imgData[:,idx_708]
data_681 = imgData[:,idx_681]
# Calculate MTCI
index = (data_753 - data_708)/(data_708 - data_681)
# If data was 3D, reshape the index value back into 3D shape
if imgData.ndim > 2:
index = np.reshape(index,[imgData.shape[0],imgData.shape[1]])
if isinstance(mask, int) is False:
idx_x, idx_y = np.where(mask==0)
index[idx_x,idx_y] = 0
return index | b1f88d2041d8cf9fa645316b47db472e3626f1f8 | 9,906 |
def GetSourceFile(file, sourcepath):
"""Return a relative file if it is embedded in a path."""
for root in sourcepath:
if file.find(root) == 0:
prefix_length = len(root)
if not root.endswith('/'):
prefix_length += 1
relative_file = file[prefix_length:]
return relative_file
return None | b241497131c3595f78ebf9d1481c8d9d50887e5a | 9,907 |
def refine_gene_list(adata, layer, gene_list, threshold, return_corrs=False):
"""Refines a list of genes by removing those that don't correlate well with the average expression of
those genes
Parameters
----------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
threshold: threshold on correlation coefficient used to discard genes (expression of each gene is
compared to the bulk expression of the group and any gene with a correlation coefficient less
than this is discarded)
return_corrs: whether to return the correlations along with the gene names (default: False)
Returns
-------
Refined list of genes that are well correlated with the average expression trend
"""
gene_list, corrs = group_corr(adata, layer, gene_list)
if (return_corrs):
return corrs[corrs >= threshold]
else:
return gene_list[corrs >= threshold] | 0b26b5265bf62a6f771bb762cd3c497fa628c5c3 | 9,910 |
def shape_to_coords(value, precision=6, wkt=False, is_point=False):
"""
Convert a shape (a shapely object or well-known text) to x and y coordinates
suitable for use in Bokeh's `MultiPolygons` glyph.
"""
if is_point:
value = Point(*value).buffer(0.1 ** precision).envelope
x_coords = list()
y_coords = list()
if wkt:
value = loads(value)
if not hasattr(value, '__len__'):
value = [value]
for v in value:
x_dict = dict()
y_dict = dict()
if not hasattr(v, 'exterior'):
v = v.buffer(0)
x_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[0]]
x_dict['holes'] = [[round(y, precision) for y in x.coords.xy[0]] for x in v.interiors]
y_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[1]]
y_dict['holes'] = [[round(y, precision) for y in x.coords.xy[1]] for x in v.interiors]
x_coords.append(x_dict)
y_coords.append(y_dict)
return x_coords, y_coords | 1b585f6bb9831db63b2e0e8c52b6fb29ba0d9ab9 | 9,911 |
def max_union(map_list):
"""
Element-wise maximum of the union of a list of HealSparseMaps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to compute the maximum of
Returns
-------
result : `HealSparseMap`
Element-wise maximum of maps
"""
return _apply_operation(map_list, np.fmax, 0, union=True, int_only=False) | 169fef50486e22468f8942f6968630e8fdef6648 | 9,913 |
def getDtypes(attributes, forecastHorizon):
"""
Auxillary function to generate dictionary of datatypes for data queried from dynamo.
Parameters
----------
attributes : list,
Attributes queried from dynamo.
forecastHorizon : integer,
Number of forecast horizons which have been queried.
Returns
-------
attributeDtypes : dict,
Dictionary to pass to dataframe to specify dtypes of all data queried.
"""
dtypes = {
"apparentTemperature": np.float64,
"cloudCover": np.float64,
"dewPoint": np.float64,
"humidity": np.float64,
"precipIntensity": np.float64,
"precipProbability": np.float64,
"pressure": np.float64,
"temperature": np.float64,
"uvIndex": np.float64,
"visibility": np.float64,
"windBearing": np.float64,
"windGust": np.float64,
"windSpeed": np.float64,
"carbonFactor": np.float64,
"carbonIndex": str
}
attributeDtypes = dict()
attributeDtypes["unixTimestamp"] = np.int32
for attribute in attributes:
dtype = dtypes[attribute]
for x in range(forecastHorizon+1):
attributeDtypes[attribute + "_" + str(x)] = dtype
return attributeDtypes | 4974b7fe8107b36556da41173508c908785ddf5f | 9,914 |
def cazy_synonym_dict():
"""Create a dictionary of accepted synonms for CAZy classes."""
cazy_dict = {
"Glycoside Hydrolases (GHs)": ["Glycoside-Hydrolases", "Glycoside-Hydrolases", "Glycoside_Hydrolases", "GlycosideHydrolases", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE_HYDROLASES", "GLYCOSIDEHYDROLASES", "glycoside-hydrolases", "glycoside-hydrolases", "glycoside_hydrolases", "glycosidehydrolases", "GH", "gh"],
"GlycosylTransferases (GTs)": ["Glycosyl-Transferases", "GlycosylTransferases", "Glycosyl_Transferases", "Glycosyl Transferases", "GLYCOSYL-TRANSFERASES", "GLYCOSYLTRANSFERASES", "GLYCOSYL_TRANSFERASES", "GLYCOSYL TRANSFERASES", "glycosyl-transferases", "glycosyltransferases", "glycosyl_transferases", "glycosyl transferases", "GT", "gt"],
"Polysaccharide Lyases (PLs)": ["Polysaccharide Lyases", "Polysaccharide-Lyases", "Polysaccharide_Lyases", "PolysaccharideLyases", "POLYSACCHARIDE LYASES", "POLYSACCHARIDE-LYASES", "POLYSACCHARIDE_LYASES", "POLYSACCHARIDELYASES", "polysaccharide lyases", "polysaccharide-lyases", "polysaccharide_lyases", "polysaccharidelyases", "PL", "pl"],
"Carbohydrate Esterases (CEs)": ["Carbohydrate Esterases", "Carbohydrate-Esterases", "Carbohydrate_Esterases", "CarbohydrateEsterases", "CARBOHYDRATE ESTERASES", "CARBOHYDRATE-ESTERASES", "CARBOHYDRATE_ESTERASES", "CARBOHYDRATEESTERASES", "carbohydrate esterases", "carbohydrate-esterases", "carbohydrate_esterases", "carbohydrateesterases", "CE", "ce"],
"Auxiliary Activities (AAs)": ["Auxiliary Activities", "Auxiliary-Activities", "Auxiliary_Activities", "AuxiliaryActivities", "AUXILIARY ACTIVITIES", "AUXILIARY-ACTIVITIES", "AUXILIARY_ACTIVITIES", "AUXILIARYACTIVITIES", "auxiliary activities", "auxiliary-activities", "auxiliary_activities", "auxiliaryactivities", "AA", "aa"],
"Carbohydrate-Binding Modules (CBMs)": ["Carbohydrate-Binding-Modules", "Carbohydrate_Binding_Modules", "Carbohydrate_Binding Modules", "CarbohydrateBindingModules", "CARBOHYDRATE-BINDING-MODULES", "CARBOHYDRATE_BINDING_MODULES", "CARBOHYDRATE_BINDING MODULES", "CARBOHYDRATEBINDINGMODULES", "carbohydrate-binding-modules", "carbohydrate_binding_modules", "carbohydrate_binding modules", "carbohydratebindingmodules", "CBMs", "CBM", "cbms", "cbm"]
}
return cazy_dict | 0d635075901cc3e6ba7b432c68e5be3f7d2c34d6 | 9,915 |
def new_oauth2ProviderLimited(pyramid_request):
"""this is used to build a new auth"""
validatorHooks = CustomValidator_Hooks(pyramid_request)
provider = oauth2_provider.OAuth2Provider(
pyramid_request,
validator_api_hooks=validatorHooks,
validator_class=CustomValidator,
server_class=WebApplicationServer,
)
return provider | ef15f43dfa0549431931210d788fd8ccde611634 | 9,916 |
def rand_color(red=(92, 220), green=(92, 220), blue=(92, 220)):
""" Random red, green, blue with the option to limit the ranges.
The ranges are tuples 0..255.
"""
r = rand_byte(red)
g = rand_byte(green)
b = rand_byte(blue)
return f"#{r:02x}{g:02x}{b:02x}" | 43244b5912585a4496abbd6868f97a368fd785f0 | 9,917 |
import base64
def tile_to_html(tile, fig_size=None):
""" Provide HTML string representation of Tile image."""
b64_img_html = '<img src="data:image/png;base64,{}" />'
png_bits = tile_to_png(tile, fig_size=fig_size)
b64_png = base64.b64encode(png_bits).decode('utf-8').replace('\n', '')
return b64_img_html.format(b64_png) | 9e22304c9ee44a850e17930088b0fc81b390fded | 9,918 |
def generate_buchwald_hartwig_rxns(df):
"""
Converts the entries in the excel files from Sandfort et al. to reaction SMILES.
"""
df = df.copy()
fwd_template = '[F,Cl,Br,I]-[c;H0;D3;+0:1](:[c,n:2]):[c,n:3].[NH2;D1;+0:4]-[c:5]>>[c,n:2]:[c;H0;D3;+0:1](:[c,n:3])-[NH;D2;+0:4]-[c:5]'
methylaniline = 'Cc1ccc(N)cc1'
pd_catalyst = Chem.MolToSmiles(Chem.MolFromSmiles('O=S(=O)(O[Pd]1~[NH2]C2C=CC=CC=2C2C=CC=CC1=2)C(F)(F)F'))
methylaniline_mol = Chem.MolFromSmiles(methylaniline)
rxn = rdChemReactions.ReactionFromSmarts(fwd_template)
products = []
for i, row in df.iterrows():
reacts = (Chem.MolFromSmiles(row['Aryl halide']), methylaniline_mol)
rxn_products = rxn.RunReactants(reacts)
rxn_products_smiles = set([Chem.MolToSmiles(mol[0]) for mol in rxn_products])
assert len(rxn_products_smiles) == 1
products.append(list(rxn_products_smiles)[0])
df['product'] = products
rxns = []
can_smiles_dict = {}
for i, row in df.iterrows():
aryl_halide = canonicalize_with_dict(row['Aryl halide'], can_smiles_dict)
can_smiles_dict[row['Aryl halide']] = aryl_halide
ligand = canonicalize_with_dict(row['Ligand'], can_smiles_dict)
can_smiles_dict[row['Ligand']] = ligand
base = canonicalize_with_dict(row['Base'], can_smiles_dict)
can_smiles_dict[row['Base']] = base
additive = canonicalize_with_dict(row['Additive'], can_smiles_dict)
can_smiles_dict[row['Additive']] = additive
reactants = f"{aryl_halide}.{methylaniline}.{pd_catalyst}.{ligand}.{base}.{additive}"
rxns.append(f"{reactants}>>{row['product']}")
return rxns | 80351743c2f651965735f38b514d7af017fc25ce | 9,919 |
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.nibabel import GenerateSamplingReference
from niworkflows.interfaces.nilearn import Merge
from niworkflows.utils.spaces import format_reference
def init_bold_std_trans_wf(
mem_gb,
omp_nthreads,
spaces,
name="bold_std_trans_wf",
use_compression=True,
use_fieldwarp=False,
):
"""
Sample fMRI into standard space with a single-step resampling of the original BOLD series.
.. important::
This workflow provides two outputnodes.
One output node (with name ``poutputnode``) will be parameterized in a Nipype sense
(see `Nipype iterables
<https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a
second node (``outputnode``) will collapse the parameterized outputs into synchronous
lists of the output fields listed below.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from niworkflows.utils.spaces import SpatialReferences
from fprodents.workflows.bold.resampling import init_bold_std_trans_wf
wf = init_bold_std_trans_wf(
mem_gb=3,
omp_nthreads=1,
spaces=SpatialReferences(
spaces=['MNI152Lin',
('MNIPediatricAsym', {'cohort': '6'})],
checkpoint=True),
)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
Name of workflow (default: ``bold_std_trans_wf``)
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
use_fieldwarp : :obj:`bool`
Include SDC warp in single-shot transform from BOLD to MNI
Inputs
------
anat2std_xfm
List of anatomical-to-standard space transforms generated during
spatial normalization.
bold_mask
Skull-stripping mask of reference image
bold_split
Individual 3D volumes, not motion corrected
fieldwarp
a :abbr:`DFM (displacements field map)` in ITK format
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
bold2anat
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
templates
List of templates that were applied as targets during
spatial normalization.
Outputs
-------
bold_std
BOLD series, resampled to template space
bold_std_ref
Reference, contrast-enhanced summary of the BOLD series, resampled to template space
bold_mask_std
BOLD series mask in template space
template
Template identifiers synchronized correspondingly to previously
described outputs.
"""
workflow = Workflow(name=name)
output_references = spaces.cached.get_spaces(nonstandard=False, dim=(3,))
std_vol_references = [
(s.fullname, s.spec) for s in spaces.references if s.standard and s.dim == 3
]
if len(output_references) == 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(
tpl=output_references[0]
)
elif len(output_references) > 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into several standard spaces,
correspondingly generating the following *spatially-normalized,
preprocessed BOLD runs*: {tpl}.
""".format(
tpl=", ".join(output_references)
)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"anat2std_xfm",
"bold_mask",
"bold_split",
"fieldwarp",
"hmc_xforms",
"bold2anat",
"name_source",
"templates",
]
),
name="inputnode",
)
iterablesource = pe.Node(
niu.IdentityInterface(fields=["std_target"]), name="iterablesource"
)
# Generate conversions for every template+spec at the input
iterablesource.iterables = [("std_target", std_vol_references)]
split_target = pe.Node(
niu.Function(
function=_split_spec,
input_names=["in_target"],
output_names=["space", "template", "spec"],
),
run_without_submitting=True,
name="split_target",
)
select_std = pe.Node(
KeySelect(fields=["anat2std_xfm"]),
name="select_std",
run_without_submitting=True,
)
select_tpl = pe.Node(
niu.Function(function=_select_template),
name="select_tpl",
run_without_submitting=True,
)
gen_ref = pe.Node(
GenerateSamplingReference(), name="gen_ref", mem_gb=0.3
) # 256x256x256 * 64 / 8 ~ 150MB)
mask_std_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="mask_std_tfm", mem_gb=1
)
ref_std_tfm = pe.Node(
ApplyTransforms(interpolation="LanczosWindowedSinc"), name="ref_std_tfm", mem_gb=1
)
# Write corrected file in the designated output dir
mask_merge_tfms = pe.Node(
niu.Merge(2),
name="mask_merge_tfms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
nxforms = 3 + use_fieldwarp
merge_xforms = pe.Node(
niu.Merge(nxforms),
name="merge_xforms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
workflow.connect([(inputnode, merge_xforms, [("hmc_xforms", "in%d" % nxforms)])])
if use_fieldwarp:
workflow.connect([(inputnode, merge_xforms, [("fieldwarp", "in3")])])
bold_to_std_transform = pe.Node(
MultiApplyTransforms(
interpolation="LanczosWindowedSinc", float=True, copy_dtype=True
),
name="bold_to_std_transform",
mem_gb=mem_gb * 3 * omp_nthreads,
n_procs=omp_nthreads,
)
merge = pe.Node(Merge(compress=use_compression), name="merge", mem_gb=mem_gb * 3)
# fmt:off
workflow.connect([
(iterablesource, split_target, [('std_target', 'in_target')]),
(iterablesource, select_tpl, [('std_target', 'template')]),
(inputnode, select_std, [('anat2std_xfm', 'anat2std_xfm'),
('templates', 'keys')]),
(inputnode, mask_std_tfm, [('bold_mask', 'input_image')]),
(inputnode, ref_std_tfm, [('bold_mask', 'input_image')]),
(inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]),
(inputnode, merge_xforms, [
(('bold2anat', _aslist), 'in2')]),
(inputnode, merge, [('name_source', 'header_source')]),
(inputnode, mask_merge_tfms, [(('bold2anat', _aslist), 'in2')]),
(inputnode, bold_to_std_transform, [('bold_split', 'input_image')]),
(split_target, select_std, [('space', 'key')]),
(select_std, merge_xforms, [('anat2std_xfm', 'in1')]),
(select_std, mask_merge_tfms, [('anat2std_xfm', 'in1')]),
(split_target, gen_ref, [(('spec', _is_native), 'keep_native')]),
(select_tpl, gen_ref, [('out', 'fixed_image')]),
(merge_xforms, bold_to_std_transform, [('out', 'transforms')]),
(gen_ref, bold_to_std_transform, [('out_file', 'reference_image')]),
(gen_ref, mask_std_tfm, [('out_file', 'reference_image')]),
(mask_merge_tfms, mask_std_tfm, [('out', 'transforms')]),
(gen_ref, ref_std_tfm, [('out_file', 'reference_image')]),
(mask_merge_tfms, ref_std_tfm, [('out', 'transforms')]),
(bold_to_std_transform, merge, [('out_files', 'in_files')]),
])
# fmt:on
output_names = [
"bold_mask_std",
"bold_std",
"bold_std_ref",
"spatial_reference",
"template",
]
poutputnode = pe.Node(
niu.IdentityInterface(fields=output_names), name="poutputnode"
)
# fmt:off
workflow.connect([
# Connecting outputnode
(iterablesource, poutputnode, [
(('std_target', format_reference), 'spatial_reference')]),
(merge, poutputnode, [('out_file', 'bold_std')]),
(ref_std_tfm, poutputnode, [('output_image', 'bold_std_ref')]),
(mask_std_tfm, poutputnode, [('output_image', 'bold_mask_std')]),
(select_std, poutputnode, [('key', 'template')]),
])
# fmt:on
# Connect parametric outputs to a Join outputnode
outputnode = pe.JoinNode(
niu.IdentityInterface(fields=output_names),
name="outputnode",
joinsource="iterablesource",
)
# fmt:off
workflow.connect([
(poutputnode, outputnode, [(f, f) for f in output_names]),
])
# fmt:on
return workflow | 5953ae62d40002283b41b4289fc45b96b50e319c | 9,920 |
def _get_indent(node):
"""Determine the indentation level of ``node``."""
indent = None
while node:
indent = find_first(node, TOKEN.INDENT)
if indent is not None:
indent = indent.value
break
node = node.parent
return indent | ed54eb8c1ea227534af0a3bd8eda9ab9089755d7 | 9,921 |
def distancesarr(image_centroid, object_centroids):
"""gets the distances between image and objects"""
distances = []
j = 0
for row in object_centroids:
distance = centroid_distance(image_centroid, object_centroids, j)
distances.append(distance)
j +=1
return distances | 7abae0c58a2cc672b789d4c8620878b7e3b46375 | 9,922 |
def obs_agent_has_neighbour(agent_id: int, factory: Factory) -> np.ndarray:
"""Does this agent have a neighbouring node?"""
agent: Table = factory.tables[agent_id]
return np.asarray(
[
agent.node.has_neighbour(Direction.up),
agent.node.has_neighbour(Direction.right),
agent.node.has_neighbour(Direction.down),
agent.node.has_neighbour(Direction.left),
]
) | d91b4d7eabcac6ed71149ad9220c2594e5054e36 | 9,923 |
def P_split_prob(b):
"""Returns the probability of b according to the P_split() distribution.
"""
"""n = b.length
if n <= 2:
p = 1.0
else:
k = 1
# si el arbol es binario y n > 2 seguro que tiene que ser splittable.
#while k < n and not b.splittable(k):
while not b.splittable(k):
k += 1
p = (1.0 / float(n)) * gP_split_prob(b, 0, k) * gP_split_prob(b, k, n)
return p"""
return gP_split_prob(b, b.start_index, b.start_index+b.length) | 94577a96e926686107a154aa82d55ceef6b9ab24 | 9,924 |
def t():
"""Or time(). Returns the number of seconds elapsed since the cartridge was run."""
global begin
return py_time.time() - begin | 1b43767629c9585fcd29c1293ee043a189332ed7 | 9,925 |
from typing import Any
def convert_none(
key: str, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False
) -> str:
"""Converts a null value into an XML element"""
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(None)
attrstring = make_attrstring(attr)
return f"<{key}{attrstring}></{key}>" | c04efd6ed52cb092d6987f627b7222668da32dfd | 9,928 |
def is_title(ngram, factor = 2.0):
"""
Define the probability of a ngram to be a title.
Factor is for the confidence coex max.
"""
confidence = 1
to_test = [n for n in ngram if n not in stop_words]
for item in to_test:
if item.istitle(): confidence += factor / len(to_test)
# print item, confidence
return confidence | 678959cdafc966d05b5ef213b0727799f20a8e0f | 9,929 |
def ul(microliters):
"""Unicode function name for creating microliter volumes"""
if isinstance(microliters,str) and ':' in microliters:
return Unit(microliters).to('microliter')
return Unit(microliters,"microliter") | 4d5d489191166a76e02cdc0211d52bec45cd65e1 | 9,931 |
def read_glh(filename):
"""
Read glitch parameters.
Parameters
----------
filename : str
Name of file to read
Returns
-------
glhParams : array
Array of median glitch parameters
glhCov : array
Covariance matrix
"""
# Extract glitch parameters
glhFit = np.genfromtxt(filename, skip_header=3)
glhParams = np.zeros(3)
glhParams[0] = np.median(glhFit[:, 8])
glhParams[1] = np.median(glhFit[:, 4])
glhParams[2] = np.median(glhFit[:, 5])
# Compute covariance matrix
tmpFit = np.zeros((len(glhFit[:, 0]), 3))
tmpFit[:, 0] = glhFit[:, 8]
tmpFit[:, 1] = glhFit[:, 4]
tmpFit[:, 2] = glhFit[:, 5]
glhCov = MinCovDet().fit(tmpFit).covariance_
# iglhCov = np.linalg.pinv(glhCov, rcond=1e-8)
return glhParams, glhCov | 6948e0f5571c6d5f7a62dad1fb136cec48e476ae | 9,932 |
def update_user_group(user_group_id, name, **options):
"""
Update a user group
:param user_group_id: The id of the user group to update
:type user_group_id: str
:param name: Name of the user group
:type name: str, optional
:param options: Generic advanced options dict, see online documentation
:type options: dict, optional
:return: The updated group
:rtype: dict
"""
uri = [USER_GROUPS_SUB_PATH, user_group_id]
params = {"name": name}
return _call_account_api("put", uri, params, **options) | 20784b935675c459b7dc258c210aedd86d7b4fb9 | 9,933 |
def longitudinal_kmeans(X, n_clusters=5, var_reg=1e-3,
fixed_clusters=True, random_state=None):
"""Longitudinal K-Means Algorithm (Genolini and Falissard, 2010)"""
n_time_steps, n_nodes, n_features = X.shape
# vectorize latent positions across time
X_vec = np.moveaxis(X, 0, -1).reshape(n_nodes, n_time_steps * n_features)
# perform normal k-means on the vectorized features
kmeans = KMeans(n_clusters=n_clusters,
random_state=random_state).fit(X_vec)
# this method assigns a single cluster to each point across time.
labels = kmeans.labels_.reshape(-1, 1)
labels = np.hstack([labels] * n_time_steps).T
# un-vectorize centers, shape (n_time_steps, n_centers, n_features)
centers_vec = kmeans.cluster_centers_
if fixed_clusters:
centers = np.empty((n_clusters, n_features))
for k in range(n_clusters):
muk = centers_vec[k].reshape(-1, n_time_steps).T
centers[k] = muk.mean(axis=0) # average position overtime
else:
centers = np.empty((n_time_steps, n_clusters, n_features))
for k in range(n_clusters):
centers[:, k] = centers_vec[k].reshape(-1, n_time_steps).T
# calculate cluster variances (assumed spherical and constant over-time)
variances = np.zeros(n_clusters, dtype=np.float64)
for k in range(n_clusters):
for t in range(n_time_steps):
variances[k] += np.var(X[t][labels[t] == k], axis=0).mean()
variances[k] /= n_time_steps
# clusters with a single data point will have zero-variance.
# assign a fudge factor in this case
variances[variances == 0.] = var_reg
return centers, variances, labels | a76581a7784480fa90afa9ab9e080a09ce5662f4 | 9,934 |
import decimal
def do_payment(
checkout_data, # Dict[str, str]
parsed_checkout, # Dict[str, str]
enable_itn, # type: bool
): # type: (...) -> Dict[str, str]
"""
Common test helper: do a payment, and assert results.
This takes a checkout's data and page parse (for session info and assertions).
This will enable and verify ITN processing if `enable_itn` is true.
Return the payment confirmation page's parse.
"""
def _post_payment(): # type: () -> requests.Response
return post_sandbox_payment(
parsed_checkout['session_type'],
parsed_checkout['session_id'],
parsed_checkout['payment_method'],
)
if enable_itn:
require_itn_configured()
with itn_handler(ITN_HOST, ITN_PORT) as itn_queue: # type: Queue
response = _post_payment()
itn_data = itn_queue.get(timeout=2)
else:
response = _post_payment()
parsed_payment = parse_payfast_page(response)
assert {
'payment_summary': parsed_checkout['payment_summary'],
'notice': 'Your payment was successful\n'
} == parsed_payment
if enable_itn:
# Check the ITN result.
# Expect whitespace-stripped versions of the checkout data.
expected = {name: value.strip(api.CHECKOUT_SIGNATURE_IGNORED_WHITESPACE)
for (name, value) in checkout_data.items()}
expected_amount_gross = '{:.2f}'.format(decimal.Decimal(checkout_data['amount'].strip()))
expected_signature = api.itn_signature(itn_data)
assert {
'm_payment_id': expected.get('m_payment_id', ''),
'pf_payment_id': itn_data.get('pf_payment_id', 'MISSING'),
'payment_status': 'COMPLETE',
'item_name': expected.get('item_name', 'MISSING'),
'item_description': expected.get('item_description', ''),
'amount_gross': expected_amount_gross,
'amount_fee': itn_data.get('amount_fee', 'MISSING'),
'amount_net': itn_data.get('amount_net', 'MISSING'),
'custom_str1': expected.get('custom_str1', ''),
'custom_str2': expected.get('custom_str2', ''),
'custom_str3': expected.get('custom_str3', ''),
'custom_str4': expected.get('custom_str4', ''),
'custom_str5': expected.get('custom_str5', ''),
'custom_int1': expected.get('custom_int1', ''),
'custom_int2': expected.get('custom_int2', ''),
'custom_int3': expected.get('custom_int3', ''),
'custom_int4': expected.get('custom_int4', ''),
'custom_int5': expected.get('custom_int5', ''),
# The sandbox seems to fix these names, rather than using the checkout submission data.
'name_first': 'Test',
'name_last': 'User 01',
'email_address': expected.get('email_address', '[email protected]'),
'merchant_id': '10000100',
'signature': expected_signature,
} == itn_data
return parsed_payment | f69383f779ce68ef28ced79d794479a4e3a4dff9 | 9,935 |
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available.
"""
# We've split out the Sphinx part of astropy-helpers into sphinx-astropy
# but we want it to be auto-installed seamlessly for anyone using
# build_docs. We check if it's already installed, and if not, we install
# it to a local .eggs directory and add the eggs to the path (these
# have to each be added to the path, we can't add them by simply adding
# .eggs to the path)
sys_path_inserts = []
sphinx_astropy_version = None
try:
except ImportError:
raise ImportError("sphinx-astropy needs to be installed to build "
"the documentation.")
return sphinx_astropy_version, sys_path_inserts | f20911b11beaf3483d1f2f829c63d654cb0557ef | 9,936 |
def SPEED_OF_LIGHT():
"""
The `SPEED_OF_LIGHT` function returns the speed of light in vacuum
(unit is ms-1) according to the IERS numerical standards (2010).
"""
return 299792458.0 | 5f0b6e6fb81018983d541a6492eb2c5aac258ff6 | 9,937 |
def filter_by_is_awesome(resources):
"""The resources being that is_awesome
Arguments:
resources {[type]} -- A list of resources
"""
return [resource for resource in resources if resource.is_awesome] | 46717a93e75dfed53bba03b5b7f8a5e8b8315876 | 9,939 |
def topograph_image(image, step):
"""
Takes in NxMxC numpy matrix and a step size and a delta
returns NxMxC numpy matrix with contours in each C cell
"""
step_gen = _step_range_gen(step)
new_img = np.array(image, copy=True)
"""step_gen ~ (255, 245, 235, 225,...) """
def myfunc(color):
for tops, bots in window(step_gen, 2):
if (color <= tops) and (color > bots):
return tops
if color > tops:
break
return 0
topograph = np.vectorize(myfunc)
return new_img if step == 1 else topograph(new_img) | c3a340c422bb16de83b132506e975fecf21a335c | 9,940 |
def _etag(cur):
"""Get current history ETag during request processing."""
h_from, h_until = web.ctx.ermrest_history_snaprange
cur.execute(("SELECT _ermrest.tstzencode( GREATEST( %(h_until)s::timestamptz, (" + _RANGE_AMENDVER_SQL + ")) );") % {
'h_from': sql_literal(h_from),
'h_until': sql_literal(h_until),
})
return cur.fetchone()[0] | bd04dca4ef140003c0df867fa258beb5c60c77dd | 9,941 |
def MakeListOfPoints(charts, bot, test_name, buildername,
buildnumber, supplemental_columns):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names to chart data, as generated by the
log processor classes (see process_log_utils.GraphingLogProcessor).
bot: A string which comes from perf_id, e.g. linux-release.
test_name: A test suite name, e.g. sunspider.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_columns: A dictionary of extra data to send with a point.
Returns:
A list of dictionaries in the format accepted by the perf dashboard.
Each dictionary has the keys "master", "bot", "test", "value", "revision".
The full details of this format are described at http://goo.gl/TcJliv.
"""
results = []
# The master name used for the dashboard is the CamelCase name returned by
# GetActiveMaster(), and not the canonical master name with dots.
master = slave_utils.GetActiveMaster()
for chart_name, chart_data in sorted(charts.items()):
point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')
for trace_name, trace_values in sorted(chart_data['traces'].items()):
is_important = trace_name in chart_data.get('important', [])
test_path = _TestPath(test_name, chart_name, trace_name)
result = {
'master': master,
'bot': bot,
'test': test_path,
'revision': point_id,
'supplemental_columns': {}
}
# Add the supplemental_columns values that were passed in after the
# calculated revision column values so that these can be overwritten.
result['supplemental_columns'].update(revision_columns)
result['supplemental_columns'].update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
result['supplemental_columns'].update(supplemental_columns)
result['value'] = trace_values[0]
result['error'] = trace_values[1]
# Add other properties to this result dictionary if available.
if chart_data.get('units'):
result['units'] = chart_data['units']
if is_important:
result['important'] = True
results.append(result)
return results | fe903667b0e3a4c381dbcbc3205ba87b2d0ef26b | 9,943 |
import csv
from io import StringIO
def parse_csv(string):
"""
Rough port of wq/pandas.js to Python. Useful for validating CSV output
generated by Django REST Pandas.
"""
if not string.startswith(','):
data = []
for row in csv.DictReader(StringIO(string)):
for key, val in row.items():
try:
row[key] = float(val)
except ValueError:
pass
data.append(row)
return [{
'data': data
}]
reader = csv.reader(StringIO(string))
val_cols = None
val_start = None
id_cols = None
for row in reader:
if row[0] == '' and not val_cols:
val_start = row.count('')
val_cols = row[val_start:]
col_meta = [{} for v in val_cols]
elif row[-1] != '' and val_cols and not id_cols:
key = row[0]
for i, meta in enumerate(row[val_start:]):
col_meta[i].update(**{key: meta})
elif row[-1] == '' and not id_cols:
id_cols = row[:row.index('')]
meta_index = {}
meta_i = 0
datasets = []
for i, ds1 in enumerate(col_meta):
if i in meta_index:
continue
meta_index[i] = meta_i
meta_i += 1
datasets.append(ds1)
if i < len(col_meta):
for j, ds2 in enumerate(col_meta[i + 1:]):
if ds1 == ds2:
meta_index[i + j + 1] = i
for d in datasets:
d['data'] = []
elif val_cols and id_cols:
ids = {
key: val
for key, val in zip(id_cols, row[:len(id_cols)])
}
records = {}
for i, val in enumerate(row[len(id_cols):]):
mi = meta_index[i]
if mi not in records:
data = ids.copy()
else:
data = records[mi]
try:
val = float(val)
except ValueError:
pass
if val != '':
data[val_cols[i]] = val
records[mi] = data
for mi, data in records.items():
datasets[mi]['data'].append(data)
return datasets | bdf32e3ff1a2d63c568200e75d5f694ef5f49ce9 | 9,944 |
def list_system_configurations():
"""
List all the system configuration parameters
Returns:
.. code-block:: python
[
{
"ParameterName": "ParameterValue"
},
...
]
Raises:
500 - ChaliceViewError
"""
try:
print("Listing all the system configuration parameters")
system_table = ddb_resource.Table(SYSTEM_TABLE_NAME)
response = system_table.scan(
ConsistentRead=True
)
configs = response["Items"]
while "LastEvaluatedKey" in response:
response = system_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"],
ConsistentRead=True
)
configs.extend(response["Items"])
except Exception as e:
print(f"Unable to list the system configuration parameters: {str(e)}")
raise ChaliceViewError(f"Unable to list the system configuration parameters: {str(e)}")
else:
return replace_decimals(configs) | 5989cc6f1bd79e5f7bd4889883dccb7fa9bf1bd4 | 9,945 |
def add_dbnsfp_to_vds(hail_context, vds, genome_version, root="va.dbnsfp", subset=None, verbose=True):
"""Add dbNSFP fields to the VDS"""
if genome_version == "37":
dbnsfp_schema = DBNSFP_SCHEMA_37
elif genome_version == "38":
dbnsfp_schema = DBNSFP_SCHEMA_38
else:
raise ValueError("Invalid genome_version: " + str(genome_version))
expr = convert_vds_schema_string_to_annotate_variants_expr(
root=root,
other_source_fields=dbnsfp_schema,
other_source_root="vds",
)
if verbose:
print(expr)
dbnsfp_vds = read_dbnsfp_vds(hail_context, genome_version, subset=subset)
return vds.annotate_variants_vds(dbnsfp_vds, expr=expr) | f3c652c77858b9e859bd47e48002a1de3d865fa0 | 9,946 |
import torch
def get_wav2vec_preds_for_wav(
path_to_wav: str,
model,
processor,
device: torch.device,
bs: int = 8,
loading_step: float = 10,
extra_step: float = 1,
) -> str:
"""
Gets binary predictions for wav file with a wav2vec 2.0 model
Args:
path_to_wav (str): absolute path to wav file
model: a wav2vec 2.0 model
processor: a wav2vec 2.0 processor
device: a torch.device object
bs (int, optional): Batch size. Defaults to 8.
loading_step (float, optional): length of fixed segments. Defaults to 10.
extra_step (float, optional): size of extra step to load before and after.
Defaults to 1.
Returns:
str: binary predictions
"""
def my_collate_fn(batch: list[np.array]) -> list[np.array]:
return [example for example in batch]
dataset = TokenPredDataset(path_to_wav, extra_step, loading_step)
dataloader = DataLoader(
dataset,
batch_size=bs,
shuffle=False,
collate_fn=my_collate_fn,
num_workers=min(cpu_count() // 2, 4),
drop_last=False,
)
# for the extra frames loaded before and after each segment
correction = int(extra_step / dataset.wav2vec_frame_length)
all_preds = []
i = 0
with torch.no_grad():
for wav_batch in iter(dataloader):
tokenized_audio = processor(
wav_batch, return_tensors="pt", padding="longest", sampling_rate=16000
)
input_values = tokenized_audio.input_values.to(device)
attention_mask = tokenized_audio.attention_mask.to(device)
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
for j, preds in enumerate(predicted_ids.tolist()):
true_length = (
attention_mask[j].cpu().numpy().sum()
/ dataset.sr
/ dataset.wav2vec_frame_length
)
# apply corrections
if i == 0:
preds = preds[:-correction]
true_length -= correction
elif i == len(dataset) - 1:
preds = preds[correction:]
true_length -= correction
else:
preds = preds[correction:-correction]
true_length -= 2 * correction
# remove padding
all_preds.extend(preds[: int(true_length)])
i += 1
tokens_preds = processor.tokenizer.convert_ids_to_tokens(all_preds)
predictions = "".join(["0" if char == "<pad>" else "1" for char in tokens_preds])
return predictions | 2f9abc97559d1853631dcdf79599190714f618c8 | 9,948 |
def header_lines(filename):
"""Read the first five lines of a file and return them as a list of strings."""
with open(filename, mode='rb') as f:
return [f.readline().decode().rstrip() for _ in range(5)] | 35056152c1566ea2d14452308f00d6903b6e4dff | 9,951 |
async def load_last_cotd(chat_id: int):
"""Load the time when the user has last received his card of the day.
Args:
chat_id (int): user chat_id
"""
QUERY = "SELECT last_cotd FROM users WHERE id = %(id)s"
async with aconn.cursor() as cur:
await cur.execute(QUERY, {"id": chat_id})
record = await cur.fetchone()
return record[0] if record else None | 2e2aabc18a014e9f96fee91f6e8d85b875edcf2a | 9,953 |
from pathlib import Path
import json
import torch
def load_model(targets, model_name='umxhq', device='cpu'):
"""
target model path can be either <target>.pth, or <target>-sha256.pth
(as used on torchub)
"""
model_path = Path(model_name).expanduser()
if not model_path.exists():
raise NotImplementedError
else:
# load model from disk
with open(Path(model_path, str(len(targets)) + '.json'), 'r') as stream:
results = json.load(stream)
target_model_path = Path(model_path) / "model.pth"
state = torch.load(
target_model_path,
map_location=device
)
max_bin = utils.bandwidth_to_max_bin(
44100,
results['args']['nfft'],
results['args']['bandwidth']
)
unmix = model.OpenUnmixSingle(
n_fft=results['args']['nfft'],
n_hop=results['args']['nhop'],
nb_channels=results['args']['nb_channels'],
hidden_size=results['args']['hidden_size'],
max_bin=max_bin
)
unmix.load_state_dict(state)
unmix.stft.center = True
unmix.eval()
unmix.to(device)
print('loadmodel function done')
return unmix | 8fdafa6ac28ed2277337dc1f3ded295668963c8a | 9,954 |
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import List
import scipy
def model_gradient_descent(
f: Callable[..., float],
x0: np.ndarray,
*,
args=(),
rate: float = 1e-1,
sample_radius: float = 1e-1,
n_sample_points: int = 100,
n_sample_points_ratio: Optional[float] = None,
rate_decay_exponent: float = 0.0,
stability_constant: float = 0.0,
sample_radius_decay_exponent: float = 0.0,
tol: float = 1e-8,
known_values: Optional[Tuple[List[np.ndarray], List[float]]] = None,
max_iterations: Optional[int] = None,
max_evaluations: Optional[int] = None) -> scipy.optimize.OptimizeResult:
"""Model gradient descent algorithm for black-box optimization.
The idea of this algorithm is to perform gradient descent, but estimate
the gradient using a surrogate model instead of, say, by
finite-differencing. The surrogate model is a least-squared quadratic
fit to points sampled from the vicinity of the current iterate.
This algorithm works well when you have an initial guess which is in the
convex neighborhood of a local optimum and you want to converge to that
local optimum. It's meant to be used when the function is stochastic.
Args:
f: The function to minimize.
x0: An initial guess.
args: Additional arguments to pass to the function.
rate: The learning rate for the gradient descent.
sample_radius: The radius around the current iterate to sample
points from to build the quadratic model.
n_sample_points: The number of points to sample in each iteration.
n_sample_points_ratio: This specifies the number of points to sample
in each iteration as a coefficient of the number of points
required to exactly determine a quadratic model. The number
of sample points will be this coefficient times (n+1)(n+2)/2,
rounded up, where n is the number of parameters.
Setting this overrides n_sample_points.
rate_decay_exponent: Controls decay of learning rate.
In each iteration, the learning rate is changed to the
base learning rate divided by (i + 1 + S)**a, where S
is the stability constant and a is the rate decay exponent
(this parameter).
stability_constant: Affects decay of learning rate.
In each iteration, the learning rate is changed to the
base learning rate divided by (i + 1 + S)**a, where S
is the stability constant (this parameter) and a is the rate decay
exponent.
sample_radius_decay_exponent: Controls decay of sample radius.
tol: The algorithm terminates when the difference between the current
iterate and the next suggested iterate is smaller than this value.
known_values: Any prior known values of the objective function.
This is given as a tuple where the first element is a list
of points and the second element is a list of the function values
at those points.
max_iterations: The maximum number of iterations to allow before
termination.
max_evaluations: The maximum number of function evaluations to allow
before termination.
Returns:
Scipy OptimizeResult
"""
if known_values is not None:
known_xs, known_ys = known_values
known_xs = [np.copy(x) for x in known_xs]
known_ys = [np.copy(y) for y in known_ys]
else:
known_xs, known_ys = [], []
if max_iterations is None:
max_iterations = np.inf
if max_evaluations is None:
max_evaluations = np.inf
n = len(x0)
if n_sample_points_ratio is not None:
n_sample_points = int(
np.ceil(n_sample_points_ratio * (n + 1) * (n + 2) / 2))
_, f = wrap_function(f, args)
res = OptimizeResult()
current_x = np.copy(x0)
res.x_iters = [] # initializes as lists
res.xs_iters = []
res.ys_iters = []
res.func_vals = []
res.model_vals = [None]
res.fun = 0
total_evals = 0
num_iter = 0
converged = False
message = None
while num_iter < max_iterations:
current_sample_radius = (sample_radius /
(num_iter + 1)**sample_radius_decay_exponent)
# Determine points to evaluate
# in ball around current point
new_xs = [np.copy(current_x)] + [
current_x + _random_point_in_ball(n, current_sample_radius)
for _ in range(n_sample_points)
]
if total_evals + len(new_xs) > max_evaluations:
message = 'Reached maximum number of evaluations.'
break
# Evaluate points
res.xs_iters.append(new_xs)
new_ys = [f(x) for x in new_xs]
res.ys_iters.append(new_ys)
total_evals += len(new_ys)
known_xs.extend(new_xs)
known_ys.extend(new_ys)
# Save function value
res.func_vals.append(new_ys[0])
res.x_iters.append(np.copy(current_x))
res.fun = res.func_vals[-1]
# Determine points to use to build model
model_xs = []
model_ys = []
for x, y in zip(known_xs, known_ys):
if np.linalg.norm(x - current_x) < current_sample_radius:
model_xs.append(x)
model_ys.append(y)
# Build and solve model
model_gradient, model = _get_least_squares_model_gradient(
model_xs, model_ys, current_x)
# calculate the gradient and update the current point
gradient_norm = np.linalg.norm(model_gradient)
decayed_rate = (
rate / (num_iter + 1 + stability_constant)**rate_decay_exponent)
# Convergence criteria
if decayed_rate * gradient_norm < tol:
converged = True
message = 'Optimization converged successfully.'
break
# Update
current_x -= decayed_rate * model_gradient
res.model_vals.append(
model.predict([-decayed_rate * model_gradient])[0])
num_iter += 1
if converged:
final_val = res.func_vals[-1]
else:
final_val = f(current_x)
res.func_vals.append(final_val)
if message is None:
message = 'Reached maximum number of iterations.'
res.x_iters.append(current_x)
total_evals += 1
res.x = current_x
res.fun = final_val
res.nit = num_iter
res.nfev = total_evals
res.message = message
return res | d5bd32f21cdc871175c3f4c1601c1da240866e14 | 9,955 |
def index():
"""Show the index."""
return render_template(
"invenio_archivematica/index.html",
module_name=_('Invenio-Archivematica')) | 9c5e62bc29466bd4eae463d1dcd71c0d880fc5f8 | 9,956 |
import re
def word_detokenize(tokens):
"""
A heuristic attempt to undo the Penn Treebank tokenization above. Pass the
--pristine-output flag if no attempt at detokenizing is desired.
"""
regexes = [
# Newlines
(re.compile(r'[ ]?\\n[ ]?'), r'\n'),
# Contractions
(re.compile(r"\b(can)\s(not)\b"), r'\1\2'),
(re.compile(r"\b(d)\s('ye)\b"), r'\1\2'),
(re.compile(r"\b(gim)\s(me)\b"), r'\1\2'),
(re.compile(r"\b(gon)\s(na)\b"), r'\1\2'),
(re.compile(r"\b(got)\s(ta)\b"), r'\1\2'),
(re.compile(r"\b(lem)\s(me)\b"), r'\1\2'),
(re.compile(r"\b(mor)\s('n)\b"), r'\1\2'),
(re.compile(r"\b(wan)\s(na)\b"), r'\1\2'),
# Ending quotes
(re.compile(r"([^' ]) ('ll|'re|'ve|n't)\b"), r"\1\2"),
(re.compile(r"([^' ]) ('s|'m|'d)\b"), r"\1\2"),
(re.compile(r'[ ]?”'), r'"'),
# Double dashes
(re.compile(r'[ ]?--[ ]?'), r'--'),
# Parens and brackets
(re.compile(r'([\[\(\{\<]) '), r'\1'),
(re.compile(r' ([\]\)\}\>])'), r'\1'),
(re.compile(r'([\]\)\}\>]) ([:;,.])'), r'\1\2'),
# Punctuation
(re.compile(r"([^']) ' "), r"\1' "),
(re.compile(r' ([?!\.])'), r'\1'),
(re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r'\1\2\3'),
(re.compile(r'([#$]) '), r'\1'),
(re.compile(r' ([;%:,])'), r'\1'),
# Starting quotes
(re.compile(r'(“)[ ]?'), r'"')
]
text = ' '.join(tokens)
for regexp, substitution in regexes:
text = regexp.sub(substitution, text)
return text.strip() | 577c2ed235aaf889699efc291d2b206a922f1f4a | 9,959 |
def googlenet_paper(pretrained=False, **kwargs):
"""
GoogLeNet Model as given in the official Paper.
"""
kwargs['aux'] = True if 'aux' not in kwargs else kwargs['aux']
kwargs['replace5x5with3x3'] = False if 'replace5x5with3x3' not in kwargs \
else kwargs['replace5x5with3x3']
return get_net(GoogLeNet, pretrained=pretrained, pretrain_url=None,
fname='googlenet', kwargs_net=kwargs, attr='classifier',
inn=1024) | 01eaf2cf89648b334f634e83ca2d774e58970999 | 9,960 |
def is_regex(obj):
"""Cannot do type check against SRE_Pattern, so we use duck typing."""
return hasattr(obj, 'match') and hasattr(obj, 'pattern') | cfd4fc702fb121735f49d4ba61395ce8f6508b1a | 9,963 |
import functools
def GetDefaultScopeLister(compute_client, project=None):
"""Constructs default zone/region lister."""
scope_func = {
compute_scope.ScopeEnum.ZONE:
functools.partial(zones_service.List, compute_client),
compute_scope.ScopeEnum.REGION:
functools.partial(regions_service.List, compute_client),
compute_scope.ScopeEnum.GLOBAL: lambda _: [ResourceStub(name='')]
}
def Lister(scopes, _):
prj = project or properties.VALUES.core.project.Get(required=True)
results = {}
for scope in scopes:
results[scope] = scope_func[scope](prj)
return results
return Lister | 25069007b68a74b26e2767e146c25466b65e3377 | 9,964 |
def find_user(username):
"""
Function that will find a user by their username and return the user
"""
return User.find_by_username(username) | ef036f0df72bbdcb9aa8db519120209e20678e83 | 9,965 |
from typing import List
def filter_by_author(resources: List[Resource], author: Author) -> List[Resource]:
"""The resources by the specified author
Arguments:
resources {List[Resource]} -- A list of resources
"""
return [resource for resource in resources if resource.author == author] | d03673ed8c45f09996e29eb996fc31fa3d073315 | 9,966 |
import time
def cpu_bound_op(exec_time, *data):
"""
Simulation of a long-running CPU-bound operation
:param exec_time: how long this operation will take
:param data: data to "process" (sum it up)
:return: the processed result
"""
logger.info("Running cpu-bound op on {} for {} seconds".format(data, exec_time))
time.sleep(exec_time)
return sum(data) | a52d3e25a75f9c7b0ab680a9ad1cb0e5d40de92a | 9,967 |
def elastic_transform_approx(
img,
alpha,
sigma,
alpha_affine,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
value=None,
random_state=None,
):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications for speed).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(1234)
height, width = img.shape[:2]
# Random affine
center_square = np.float32((height, width)) // 2
square_size = min((height, width)) // 3
alpha = float(alpha)
sigma = float(sigma)
alpha_affine = float(alpha_affine)
pts1 = np.float32(
[
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size,
]
)
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
matrix = cv2.getAffineTransform(pts1, pts2)
warp_fn = _maybe_process_in_chunks(
cv2.warpAffine, M=matrix, dsize=(width, height), flags=interpolation, borderMode=border_mode, borderValue=value
)
img = warp_fn(img)
dx = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
dx *= alpha
dy = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
dy *= alpha
x, y = np.meshgrid(np.arange(width), np.arange(height))
map_x = np.float32(x + dx)
map_y = np.float32(y + dy)
remap_fn = _maybe_process_in_chunks(
cv2.remap, map1=map_x, map2=map_y, interpolation=interpolation, borderMode=border_mode, borderValue=value
)
return remap_fn(img) | 9684847e756e0299be6766b5bb8220e6a1b4fc8d | 9,968 |
def jitter(t, X, amountS):
"""Return a random number (intended as a time offset, i.e. jitter) within the range +/-amountS
The jitter is different (but constant) for any given day in t (epoch secs)
and for any value X (which might be e.g. deviceID)"""
dt = ISO8601.epoch_seconds_to_datetime(t)
dayOfYear = int(dt.strftime("%j"))
year = int(dt.strftime("%Y"))
uniqueValue = year*367+dayOfYear+abs(hash(X)) # Note that hash is implementation-dependent so may give different results on different platforms
rand = utils.hashIt(uniqueValue,100)
sign = int(str(uniqueValue)[0]) < 5
v = (rand / 100.0) * amountS
if sign:
v = -v
return v | db62c4365bf4cbf9d2ed0587c51846a274a691a4 | 9,970 |
import string
def check_DNA(DNA_sequence):
"""Check that we have a DNA sequence without junk"""
#
# Remove all spaces
DNA_sequence=string.replace(DNA_sequence,' ','')
# Upper case
DNA_sequence=string.upper(DNA_sequence)
# Check that we only have DNA bases in the seq
ok=1
garbage={}
DNA_bases=['A','G','C','T']
for letter in DNA_sequence:
if not letter in DNA_bases:
ok=None
garbage[letter]=1
if ok:
return ok, DNA_sequence
return ok,garbage.keys() | d73b8176938716b5c3710055750f05b24eab80a5 | 9,971 |
def read():
"""
read() : Fetches documents from Firestore collection as JSON
warehouse : Return document that matches query ID
all_warehouses : Return all documents
"""
try:
warehouse_id = request.args.get('id')
if warehouse_id:
warehouse = warehouse_ref.document(warehouse_id).get()
return jsonify(warehouse.to_dict()), 200
else:
all_warehouses = [doc.to_dict() for doc in warehouse_ref.stream()]
return jsonify(all_warehouses), 200
except Exception as e:
return f"An Error Occured: {e}" | 17d3622f9f0770edb333907298112487432c7025 | 9,972 |
def transposeC(array, axes=None):
"""
Returns the (conjugate) transpose of the input `array`.
Parameters
----------
array : array_like
Input array that needs to be transposed.
Optional
--------
axes : 1D array_like of int or None. Default: None
If *None*, reverse the dimensions.
Else, permute the axes according to the values given.
Returns
-------
array_t : :obj:`~numpy.ndarray` object
Input `array` with its axes transposed.
Examples
--------
Using an array with only real values returns its transposed variant:
>>> array = np.array([[1, 2.5], [3.5, 5]])
>>> array
array([[ 1. , 2.5],
[ 3.5, 5. ]])
>>> transposeC(array)
array([[ 1. , 3.5],
[ 2.5, 5. ]])
And using an array containing complex values returns its conjugate
transposed:
>>> array = np.array([[1, -2+4j], [7.5j, 0]])
>>> array
array([[ 1.+0.j , -2.+4.j ],
[ 0.+7.5j, 0.+0.j ]])
>>> transposeC(array)
array([[ 1.-0.j , 0.-7.5j],
[-2.-4.j , 0.-0.j ]])
"""
# Take the transpose of the conjugate or the input array and return it
return(np.transpose(np.conjugate(array), axes)) | fecd60d72c4c38dc87d59f430365cefe72f40ef4 | 9,973 |
from typing import List
def _partition_files(files: List[str], num_partitions: int) -> List[List[str]]:
"""Split files into num_partitions partitions of close to equal size"""
id_to_file = defaultdict(list)
for f in files:
id_to_file[_sample_id_from_path(f)[0]].append(f)
sample_ids = np.array(list(id_to_file))
np.random.shuffle(sample_ids)
split_ids = np.array_split(sample_ids, num_partitions)
splits = [
sum((id_to_file[sample_id] for sample_id in split), []) for split in split_ids
]
return [split for split in splits if split] | e9fac329f8e1c1c7682984216c34e7b259776c82 | 9,974 |
import json
from pathlib import Path
import time
import requests
def run_vscode_command(
command: str,
*args: str,
wait_for_finish: bool = False,
expect_response: bool = False,
decode_json_arguments: bool = False,
):
"""Execute command via vscode command server."""
# NB: This is a hack to work around the fact that talon doesn't support
# variable argument lists
args = list(
filter(
lambda x: x is not NotSet,
args,
)
)
if decode_json_arguments:
args = [json.loads(arg) for arg in args]
port_file_path = Path(gettempdir()) / "vscode-port"
original_contents = port_file_path.read_text()
# Issue command to VSCode telling it to update the port file. Because only
# the active VSCode instance will accept keypresses, we can be sure that
# the active VSCode instance will be the one to write the port.
if is_mac:
actions.key("cmd-shift-alt-p")
else:
actions.key("ctrl-shift-alt-p")
# Wait for the VSCode instance to update the port file. This generally
# happens within the first millisecond, but we give it 3 seconds just in
# case.
start_time = time.monotonic()
new_contents = port_file_path.read_text()
sleep_time = 0.0005
while True:
if new_contents != original_contents:
try:
decoded_contents = json.loads(new_contents)
# If we're successful, we break out of the loop
break
except ValueError:
# If we're not successful, we keep waiting; we assume it was a
# partial write from VSCode
pass
time.sleep(sleep_time)
sleep_time *= 2
if time.monotonic() - start_time > 3.0:
raise Exception("Timed out waiting for VSCode to update port file")
new_contents = port_file_path.read_text()
port = decoded_contents["port"]
response = requests.post(
f"http://localhost:{port}/execute-command",
json={
"commandId": command,
"args": args,
"waitForFinish": wait_for_finish,
"expectResponse": expect_response,
},
timeout=(0.05, 3.05),
)
response.raise_for_status()
actions.sleep("25ms")
if expect_response:
return response.json() | 4fa3626f1371c0c03923f37136616fb7055ef9cf | 9,975 |
import threading
def run_with_timeout(proc, timeout, input=None):
"""
Run Popen process with given timeout. Kills the process if it does
not finish in time.
You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise
the output will be None.
The returncode is 999 if the process was killed.
:returns: (returncode, stdout string, stderr string)
"""
output = []
def target():
output.extend(proc.communicate(input))
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
killed = False
thread.join(timeout)
if thread.is_alive():
proc.terminate()
killed = True
thread.join()
returncode = proc.returncode
if killed:
returncode = 999
return returncode, output[0], output[1] | 414e18dae8f31b20c472f7da14475f8da5761781 | 9,976 |
def dot(x, y, alpha=0):
"""
Compute alpha = xy + alpha, storing the incremental sum in alpha
x and y can be row and/or column vectors. If necessary, an
implicit transposition happens.
"""
assert type(x) is matrix and len(x.shape) is 2, \
"laff.dot: vector x must be a 2D numpy.matrix"
assert type(y) is matrix and len(y.shape) is 2, \
"laff.dot: vector y must be a 2D numpy.matrix"
if(type(alpha) is matrix): m_alpha, n_alpha = alpha.shape
assert isinstance(alpha,(int,float,complex)) or (m_alpha is 1 and n_alpha is 1), \
"laff.scal: alpha must be a 1 x 1 matrix"
if(type(alpha) is matrix): alpha[0,0] = 0
else: alpha = 0
m_x, n_x = x.shape
m_y, n_y = y.shape
assert m_x is 1 or n_x is 1, "laff.dot: x is not a vector"
assert m_y is 1 or n_y is 1, "laff.dot: y is not a vector"
if m_x is 1 and m_y is 1: # x is a row, y is a row
assert n_x == n_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(n_x): alpha[0,0] += y[0, i] * x[0, i]
else:
for i in range(n_x): alpha += y[0, i] * x[0, i]
elif n_x is 1 and n_y is 1: # x is a column, y is a column
assert m_x == m_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(m_x): alpha[0,0] += y[i, 0] * x[i, 0]
else:
for i in range(m_x): alpha += y[i, 0] * x[i, 0]
elif m_x is 1 and n_y is 1: # x is a row, y is a column
assert n_x == m_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(n_x): alpha[0,0] += y[i, 0] * x[0, i]
else:
for i in range(n_x): alpha += y[i, 0] * x[0, i]
elif n_x is 1 and m_y is 1: # x is a column, y is a row
assert m_x == n_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(m_x): alpha += y[0, i] * x[i, 0]
else:
for i in range(m_x): alpha += y[0, i] * x[i, 0]
return alpha | 2ef9fd4b02a586e9caff70b75bd598e925608171 | 9,977 |
def load_train_test_data(
train_data_path, label_binarizer, test_data_path=None,
test_size=None, data_format="list"):
"""
train_data_path: path. path to JSONL data that contains text and tags fields
label_binarizer: MultiLabelBinarizer. multilabel binarizer instance used to transform tags
test_data_path: path, default None. path to test JSONL data similar to train_data
test_size: float, default None. if test_data_path not provided, dictates portion to be used as test
data_format: str, default list. controls data are returned as lists or generators for memory efficiency
"""
if data_format == "list":
if test_data_path:
X_train, Y_train, _ = load_data(train_data_path, label_binarizer)
X_test, Y_test, _ = load_data(test_data_path, label_binarizer)
else:
X, Y, _ = load_data(train_data_path, label_binarizer)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, random_state=42, test_size=test_size
)
else:
if test_data_path:
X_train = partial(yield_texts, train_data_path)
Y_train = partial(yield_tags, train_data_path, label_binarizer)
X_test = partial(yield_texts, test_data_path)
Y_test = partial(yield_tags, test_data_path, label_binarizer)
else:
# need to split train / test and shuffle in memory efficient way
raise NotImplementedError
return X_train, X_test, Y_train, Y_test | 51aaf916f948b198e1f25c002655731008c173ed | 9,978 |
def get_token() -> str:
"""Obtains the Access Token from the Authorization Header"""
# Get the authorization header
authorization_header = request.headers.get("Authorization", None)
# Raise an error if no Authorization error is found
if not authorization_header:
payload = {
"code": "authorization_header_missing",
"description": "Authorization header is expected",
}
raise AuthError(payload, 401)
authorization_header_parts = authorization_header.split()
# We are expecting the Authorization header to contain a Bearer token
if authorization_header_parts[0].lower() != "bearer":
payload = {
"code": "invalid_header",
"description": "Authorization header must be a Bearer token",
}
raise AuthError(payload, 401)
# The Authorization header is prefixed with Bearer, but does not contain the actual token
elif len(authorization_header_parts) == 1:
payload = {"code": "invalid_header", "description": "Token not found"}
raise AuthError(payload, 401)
# We only expect 2 parts, "Bearer" and the access token
elif len(authorization_header_parts) > 2:
payload = {
"code": "invalid_header",
"description": "Authorization header must be a valid Bearer token",
}
raise AuthError(payload, 401)
# If all checks out, we return the access token
return authorization_header_parts[1] | 5e1d05f705ad1c7505963e96c8637e5ab42aff79 | 9,980 |
def convert_op_str(qubit_op_str, op_coeff):
"""
Convert qubit operator into openfermion format
"""
converted_Op=[f'{qOp_str}{qNo_index}' for qNo_index, qOp_str in enumerate(qubit_op_str) if qOp_str !='I']
seperator = ' ' #space
Openfermion_qubit_op = QubitOperator(seperator.join(converted_Op), op_coeff)
return Openfermion_qubit_op | a6a512758a706b3a788f686331747ac9224c2f8b | 9,982 |
def __state_resolving_additional_facts(conversation, message, just_acknowledged):
"""
Bot is asking the user questions to resolve additional facts
:param conversation: The current conversation
:param message: The user's message
:param just_acknowledged: Whether or not an acknowledgement just happened.
Used to skip fact resolution and instead asks a question immediately.
:return: A question to as
"""
question = None
# Retrieve current_fact from conversation
current_fact = conversation.current_fact
if just_acknowledged:
question = Responses.fact_question(current_fact.name)
else:
# Extract entity from message based on current fact
fact_entity_value = __extract_entity(current_fact.name, current_fact.type, message)
if fact_entity_value is not None:
next_fact = fact_service.submit_resolved_fact(conversation, current_fact, fact_entity_value)
new_fact_id = next_fact['fact_id']
new_fact = None
if new_fact_id:
new_fact = db.session.query(Fact).get(new_fact_id)
conversation.current_fact = new_fact
# Additional facts remain to be asked
if fact_service.has_additional_facts(conversation):
# Additional fact limit reached, time for a new prediction
if fact_service.count_additional_facts_resolved(conversation) % MAX_ADDITIONAL_FACTS == 0:
conversation.bot_state = BotState.GIVING_PREDICTION
else:
question = Responses.fact_question(new_fact.name)
else:
# There are no more additional facts! Give a prediction
conversation.bot_state = BotState.GIVING_PREDICTION
return question | d1e75e0d67aa2b1bcc899885c83132a47df015dc | 9,983 |
import json
def load_dataset(path):
"""Load json file and store fields separately."""
with open(path) as f:
data = json.load(f)['data']
output = {'qids': [], 'questions': [], 'answers': [],
'contexts': [], 'qid2cid': []}
for article in data:
for paragraph in article['paragraphs']:
output['contexts'].append(paragraph['context'])
for qa in paragraph['qas']:
output['qids'].append(qa['id'])
output['questions'].append(qa['question'])
output['qid2cid'].append(len(output['contexts']) - 1)
if 'answers' in qa:
output['answers'].append(qa['answers'])
return output | 4ba01f49d6a0aa3329b076fc0de9dd38fb99f2f8 | 9,984 |
def generate_rand_enex_by_prob_nb(shape: tp.Shape,
entry_prob: tp.MaybeArray[float],
exit_prob: tp.MaybeArray[float],
entry_wait: int,
exit_wait: int,
entry_pick_first: bool,
exit_pick_first: bool,
flex_2d: bool,
seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another.
`entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
temp_idx_arr = np.empty((shape[0],), dtype=np.int_)
return generate_enex_nb(
shape,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d),
rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d)
) | fbb7fc4bcf50139f455049edd4af62e9c0429dd3 | 9,985 |
from datetime import datetime
import html
import requests
from textwrap import dedent
def retrieve(last_updated=datetime.now()):
""" Crawls news and returns a list of tweets to publish. """
print('Retrieving {} alzheimer news since {}.'.format(SITE, last_updated))
to_ret = list()
# Get all the content from the last page of the site's news
tree = html.fromstring(requests.get(URL).content)
# Get list of articles
articles = CSSSelector('article')(tree)
for article in articles:
# For each article parse the date on the metadata and compare to the last update of the bot.
# If the article is newer it should go on until it finds one that's not
link = CSSSelector('article .ue-c-cover-content__link')(article)[0].get('href')
if "promo" in link.lower() or "follow" in link.lower():
continue
news_page = html.fromstring(requests.get(link).content)
news_date = CSSSelector('time')(news_page)[0].get('datetime')
news_datetime = datetime.strptime(news_date, '%Y-%m-%dT%H:%M:%SZ')
if news_datetime < last_updated:
break
# Get the useful parts of each article to compose a tweet.
title = CSSSelector('article .ue-c-cover-content__headline')(article)[0].text
author = CSSSelector('.ue-c-article__byline-name a, .ue-c-article__byline-name')(news_page)[0].text
article_body = str(etree.tostring(CSSSelector('.ue-l-article__body')(news_page)[0]))
if "alzheimer" not in article_body.lower():
continue
# Compose a tweet with the article's information
tweet = """
{title}
Autor/a: {author}
Enlace: https:{link} ({site})
""".format(title=title, author=author, link=link, site=SITE)
to_ret.append(dedent(tweet))
# Returns a list of tweets ready for the bot to tweet.
return to_ret | ba194b84a50164ca8a238a77d0e80d5e80c93ae2 | 9,988 |
import binascii
def check_seal(item):
"""
Given a message object, use the "seal" attribute - a cryptographic
signature to prove the provenance of the message - to check it is valid.
Returns a boolean indication of validity.
"""
try:
item_dict = to_dict(item)
raw_sig = item_dict['seal']
signature = binascii.unhexlify(raw_sig.encode('ascii'))
key = rsa.PublicKey.load_pkcs1(item_dict['sender'].encode('ascii'))
del item_dict['seal']
del item_dict['message']
root_hash = _get_hash(item_dict).hexdigest()
return rsa.verify(root_hash.encode('ascii'), signature, key)
except:
pass
return False | 86bb7b22d2efe4e7117b3c65fad2a7dc4853b428 | 9,989 |
def plot_win_prob(times, diff, end_lim, probs, team_abr, bools):
""" This function plots the win probability and
score differential for the game
@param times (list): list containing actual_times
and times. times contains all of the times at
which win probability was calculated
@param diff (list): List of score differentials
corresponding to all times in actual_times
@param end_lim (int): Time at which the last win
probability value is calculated
@param probs (list): List of win probability
lists (probs_home and probs_away). probs_home
contains all of the home win probability
values for all times in the times list.
probs_away is the same, but for win probability
for the away team
@param team_abr (list): List contraining the
home team abbreviation in the first index
and the away team abbreviation in the
second index
@param bools (list): List of booleans controlling
which figures are plotted
Returns:
- fig (matplotlib.figure.Figure): Figure
containing score differential and/or
win probability. None if all of the
booleans are False
"""
actual_times, times = times
probs_home, probs_away = probs
plot_diff, plot_home, plot_away = bools
plt.rcParams["figure.figsize"] = (20,6)
# Score differential
if plot_diff:
fig, pltting = \
plot_score_differential(actual_times,
diff,
end_lim)
else:
fig,ax = plt.subplots()
pltting = ax
# Quarter deliniation
for normal_q in range(0,4):
pltting.plot([2880-normal_q*12*60, 2880-normal_q*12*60],
[0,1], 'gray')
# OT deliniation
for ot in range(0,10):
pltting.plot([-ot*5*60, -ot*5*60],
[0,1], 'gray')
# Win probability
if plot_home:
pltting.plot(times, probs_home, 'blue', label=team_abr[0])
if plot_away:
pltting.plot(times, probs_away, 'orange', label=team_abr[-1])
pltting.set_xlim(2880, end_lim)
pltting.set_ylim(0.0, 1.0)
pltting.set_title("Win Probability")
plt.legend(loc='best')
plt.show()
return fig | 139906b4a2db3cf3a7ffa531ec0701be0d395b13 | 9,990 |
def add_office():
"""Given that i am an admin i should be able to add a political office
When i visit ../api/v2/offices endpoint using POST method"""
if is_admin() is not True:
return is_admin()
errors = []
try:
if not request.get_json(): errors.append(
make_response(jsonify({'status': 409, "message": "missing input data"}), 409))
office_data = request.get_json()
check_missingfields = validate.missing_value_validator(['name', 'type'], office_data)
if check_missingfields is not True:
return check_missingfields
check_emptyfield = validate.empty_string_validator(['name', 'type'], office_data)
if check_emptyfield is not True:
return check_emptyfield
check_if_text_only = validate.text_arrayvalidator(['name', 'type'], office_data)
if check_if_text_only is not True:
return check_if_text_only
office_name = office_data['name']
office_type = office_data['type']
if len(errors) > 0:
for e in errors:
return e
res = office.add_office(office_name, office_type)
return res
except Exception as e:
return make_response(jsonify({'message': "something went wrong " + str(e.args[0]), 'status': 400}), 400) | ee990cb55ca819a1b4fdd2eed4346f7fca21a7c3 | 9,991 |
from pathlib import Path
import click
def send_message(
mg: mailgun.MailGun,
templates: t.Tuple[str, str],
contact_name: str,
contact_email: str,
sender: str,
reply_to: str,
sponsorship_package: t.Optional[Path],
dry_run: bool,
) -> bool:
"""
Send an individual email and report if it was successful
:param mg: the MailGun instance
:param templates: the text and html templates respectively
:param contact_name: the name of the contact at the company
:param contact_email: the email of the contact at the company
:param sender: the name of the person sending the email
:param reply_to: the email which replies are directed to
:param sponsorship_package: an optional file for the sponsorship package
:param dry_run: whether to actually send the email
:return: whether the sending was successful
"""
text, html = templates
# Format the sender email
sender_email = f"{sender[0]}{sender[sender.index(' ') + 1:].replace('-', '')}@{mg.domain}".lower()
# Get and format the contact email(s)
pairs = getaddresses([contact_email.replace(" ", "")])
emails = []
for _, email in pairs:
if email == "":
logger.error(f'invalid email address found in "{contact_email}"')
return False
emails.append(f"{contact_name} <{email.lower()}>")
# Print out the content on dry runs
if dry_run:
click.echo(
f"To: {', '.join(emails)}\n"
f"From: {sender} <{sender_email}>\n"
f"Subject: WaffleHacks Sponsorship Opportunity\n"
f"Reply To: {reply_to}\n\n\n"
f"{text}",
file=open(f"./dry-run-out/{contact_name} - {uuid4()}", "w"),
)
return True
try:
# Open the package if necessary
files = []
if sponsorship_package:
files.append(sponsorship_package.open("rb"))
mg.send(
from_=f"{sender} <{sender_email}>",
to=emails,
subject="WaffleHacks Sponsorship Opportunity",
text=text,
html=html,
files=files,
headers={"Reply-To": reply_to},
)
except mailgun.MailGunException as e:
logger.error(f"failed to send message: {e}")
return False
return True | 62d03de5fa7a3c579ff2351e2c4623b3bf0e8a8e | 9,992 |
def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:
"""
TensorFlow dataset instance with multi-class sparse labels (5 classes)
:return: Multi-class sparse (labels) classification dataset
"""
# Create features
X = tf.random.normal(shape=(100, 3))
# Create one multi-class (one hot) labels
y = tf.random.uniform(minval=0, maxval=5, dtype=tf.int32, shape=(100,))
return tf.data.Dataset.from_tensor_slices((X, y)) | 05bd5f809e08fde21270c286351ed32b9ed2cb97 | 9,993 |
def skipIfNAN(proteinPath):
""" Test if there is a NAN (not a number) in the lists """
overlapArrayWhole = None
overlapArrayInterface = None
overlapTApproxWhole = None
overlapTApproxInterface = None
try:
overlapArrayWhole = np.loadtxt(proteinPath+"overlapArrayWhole.txt")
except IOError:
pass
try:
overlapArrayInterface = np.loadtxt(proteinPath+"overlapArrayInterface.txt")
except IOError:
pass
try:
overlapTApproxWhole = np.loadtxt(proteinPath+"overlapTApproxWhole.txt")
except IOError:
pass
try:
overlapTApproxInterface = np.loadtxt(proteinPath+"overlapTApproxInterface.txt")
except IOError:
pass
if overlapArrayWhole is not None and np.isnan(overlapArrayWhole).any():
print "skipped"
return True
if overlapArrayInterface is not None and np.isnan(overlapArrayInterface).any():
print "skipped"
return True
if overlapTApproxWhole is not None and np.isnan(overlapTApproxWhole).any():
print "skipped"
return True
if overlapTApproxInterface is not None and np.isnan(overlapTApproxInterface).any():
print "skipped"
return True
return False | 0993fe55879e2c965b9856435e38f3a33d803e33 | 9,994 |
import json
def alignment_view(request, project_uid, alignment_group_uid):
"""View of a single AlignmentGroup.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome__project=project, uid=alignment_group_uid)
# Initial javascript data.
init_js_data = json.dumps({
'project': adapt_model_instance_to_frontend(project),
'alignment_group': adapt_model_instance_to_frontend(alignment_group)
})
context = {
'project': project,
'tab_root': TAB_ROOT__DATA,
'alignment_group': alignment_group,
'experiment_sample_to_alignment_list_json': adapt_model_to_frontend(
ExperimentSampleToAlignment,
{'alignment_group': alignment_group}),
'init_js_data': init_js_data,
'flag_genome_finishing_enabled': int(settings.FLAG__GENOME_FINISHING_ENABLED)
}
return render(request, 'alignment.html', context) | 50f9420dca7c939524e1e243d667ddd76d7687d0 | 9,995 |
def get_matching_string(matches, inputText, limit=0.99):
"""Return the matching string with all of the license IDs matched with the input license text if none matches then it returns empty string.
Arguments:
matches {dictionary} -- Contains the license IDs(which matched with the input text) with their respective sorensen dice score as valus.
limit {float} -- limit at which we will consider the match as a perfect match.
inputText {string} -- license text input by the user.
Returns:
string -- matching string containing the license IDs that actually matched else returns empty string.
"""
if not matches:
matchingString = 'There is not enough confidence threshold for the text to match against the SPDX License database.'
return matchingString
elif 1.0 in matches.values() or all(limit < score for score in matches.values()):
matchingString = 'The following license ID(s) match: ' + ", ".join(matches.keys())
return matchingString
else:
for licenseID in matches:
listedLicense = getListedLicense(licenseID)
isTextStandard = checkTextStandardLicense(listedLicense, inputText)
if not isTextStandard:
matchingString = 'The following license ID(s) match: ' + licenseID
return matchingString
else:
return '' | be0fe152e530ec8244f892bfb4887b78bf89027b | 9,997 |
def get_review(annotation):
"""
Get annotation's review (if exists).
"""
try:
review = Comment.objects.get(annotation=annotation)
return review
except Comment.DoesNotExist:
return None | 89aeee2dc8811c57265ebbc30ede9cfafcd5e696 | 9,998 |
def load_grid(grdfiles, blocks, dimpart, nsigma, **kwargs):
"""Setup a `grid` by reading `grdfiles` on `blocks`
"""
ncgrid = nct.MDataset(grdfiles, blocks, dimpart, **kwargs)
# dummy time, to be updated later
time = ma.Marray(np.arange(10), dims=tdims)
lat = nct.readmarray(ncgrid, "lat_rho", hdims)
lon = nct.readmarray(ncgrid, "lon_rho", hdims)
if ncgrid.halow > 0:
halow = ncgrid.halow
# extend lon-lat on the halow=1 to infer correctly @ f-points
fill_halo(lat, halow)
fill_halo(lon, halow)
depth = nct.readmarray(ncgrid, "h", hdims)
angle = nct.readmarray(ncgrid, "angle", hdims)
mask = nct.readmarray(ncgrid, "mask_rho", hdims)
pm = nct.readmarray(ncgrid, "pm", hdims)
pn = nct.readmarray(ncgrid, "pn", hdims)
f = nct.readmarray(ncgrid, "f", hdims)
sigma = ma.Marray((np.arange(nsigma)+0.5)/nsigma, dims=vdims)
coords = {"t": time, "sigma": sigma, "eta": lat, "xi": lon}
return gr.Grid(coords, dims, depth=depth, angle=angle, mask=mask, pm=pm, pn=pn, f=f, **kwargs) | 08d102c4a1ef163e2af4801d7ffe2b572b747a58 | 9,999 |
import inspect
def with_patch_inspect(f):
"""decorator for monkeypatching inspect.findsource"""
def wrapped(*args, **kwargs):
save_findsource = inspect.findsource
save_getargs = inspect.getargs
inspect.findsource = findsource
inspect.getargs = getargs
try:
return f(*args, **kwargs)
finally:
inspect.findsource = save_findsource
inspect.getargs = save_getargs
return wrapped | 711fa3099b0c6242b623305237f950120b3de19a | 10,000 |
def apply_hux_f_model(r_initial, dr_vec, dp_vec, r0=30 * 695700, alpha=0.15, rh=50 * 695700, add_v_acc=True,
omega_rot=(2 * np.pi) / (25.38 * 86400)):
"""Apply 1d upwind model to the inviscid burgers equation.
r/phi grid. return and save all radial velocity slices.
:param r_initial: 1d array, initial condition (vr0). units = (km/sec).
:param dr_vec: 1d array, mesh spacing in r. units = (km)
:param dp_vec: 1d array, mesh spacing in p. units = (radians)
:param alpha: float, hyper parameter for acceleration (default = 0.15).
:param rh: float, hyper parameter for acceleration (default r=50*695700). units: (km)
:param r0: float, initial radial location. units = (km).
:param add_v_acc: bool, True will add acceleration boost.
:param omega_rot: differential rotation.
:return: velocity matrix dimensions (nr x np)
"""
v = np.zeros((len(dr_vec) + 1, len(dp_vec) + 1)) # initialize array vr.
v[0, :] = r_initial
if add_v_acc:
v_acc = alpha * (v[0, :] * (1 - np.exp(-r0 / rh)))
v[0, :] = v_acc + v[0, :]
for i in range(len(dr_vec)):
for j in range(len(dp_vec) + 1):
if j == len(dp_vec): # force periodicity
v[i + 1, j] = v[i + 1, 0]
else:
if (omega_rot * dr_vec[i]) / (dp_vec[j] * v[i, j]) > 1:
print(dr_vec[i] - dp_vec[j] * v[i, j] / omega_rot)
print(i, j) # courant condition
frac1 = (v[i, j + 1] - v[i, j]) / v[i, j]
frac2 = (omega_rot * dr_vec[i]) / dp_vec[j]
v[i + 1, j] = v[i, j] + frac1 * frac2
return v | 31fb582cc8d31702d8ac8aabb2dd099f169b0c08 | 10,001 |
import inspect
def requires_request_arg(method):
"""
Helper function to handle deprecation of old ActionMenuItem API where get_url, is_show,
get_context and render_html all accepted both 'request' and 'parent_context' as arguments
"""
try:
# see if this is a pre-2.15 get_url method that takes both request and context kwargs
inspect.signature(method).bind({})
except TypeError:
return True
else:
return False | 0ec09e34c04d4d54762051b01af8c80754d47125 | 10,002 |
def show_output_to_df(
show_output: str,
spark_session: SparkSession,
default_data_type: str = 'string'
):
"""
Takes a string containing the output of a Spark DataFrame.show() call and
"rehydrates" it into a new Spark DataFrame instance. Example input:
+--------+--------+
|column_a|column_b|
+--------+--------+
|value 1a|value 1b|
|value 2a|value 2b|
+--------+--------+
Optionally, row delimiters can be omitted, and comment lines can be present
(whether or not row delimiters are provided):
|column_a|column_b|
|value 1a|value 1b|
# This is a comment that gets ignored.
|value 2a|value 2b|
Optionally, data types can be specified in a second header line, prefixed
with the DATA_TYPE_START_INDICATOR ("["):
+-------------+----------+------------+-------------------+-----------+
|string_column|int_column|float_column|timestamp_column |bool_column|
[string |int |float |timestamp |boolean ]
+-------------+----------+------------+-------------------+-----------+
|one |1 |1.1 |2018-01-01 00:00:00|true |
|two |2 |2.2 |2018-01-02 12:34:56|false |
+-------------+----------+------------+-------------------+-----------+
:param show_output: A string that resembles the output of a call to
DataFrame.show()
:param spark_session: A SparkSession used to create the new DataFrame instance
:param default_data_type: The default data type that will be used for all
columns for which the data type is not specified in the data type
declaration line
:return: A DataFrame containing the values represented in the input string
"""
if not show_output:
raise ValueError('show_output is required.')
rows = []
column_names = None
types = None
# Added a schema because createDataFrame() does introspection otherwise and
# sometimes gets it wrong with int/bigint and nulls.
schema = None
for line in show_output.strip().splitlines():
line = line.strip()
if not line.startswith(tuple(f'|{DATA_TYPE_START_INDICATOR}')):
continue
line_parts = line.split('|')[1:-1]
values = [part.strip() for part in line_parts]
if column_names is None:
column_names = values
continue
if line.startswith(DATA_TYPE_START_INDICATOR):
if types is None:
line = line.replace(DATA_TYPE_START_INDICATOR, '|', 1)\
.rstrip(f'{DATA_TYPE_END_INDICATOR}|') + '|'
types = [part.strip() for part in line.split('|')[1:-1]]
types = [data_type if len(data_type) > 0 else default_data_type
for data_type in types]
continue
else:
raise ValueError('Cannot have more than one data type declaration line.')
if types is None:
types = [default_data_type] * len(column_names)
_cast_types(values, types)
row_dict = dict(zip(column_names, values))
rows.append(Row(**row_dict))
if types is None:
# This can happen if data types are not specified and no data rows are
# provided.
types = [default_data_type] * len(column_names)
schema = _get_schema(column_names, types)
# Return a DataFrame with the columns in the original order:
return spark_session.createDataFrame(rows, schema=schema).select(column_names) | 0dd9372b29d191a846ac4a1e2251c118e4a01102 | 10,003 |
import math
def Schwefel(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Schwefel function."""
del seed
dim = len(arr)
bernoulli_arr = np.array([pow(-1, i + 1) for i in range(dim)])
x_opt = 4.2096874633 / 2.0 * bernoulli_arr
x_hat = 2.0 * (bernoulli_arr * arr) # Element-wise multiplication
z_hat = np.zeros([dim, 1])
z_hat[0, 0] = x_hat[0]
for i in range(1, dim):
z_hat[i, 0] = x_hat[i] + 0.25 * (x_hat[i - 1] - 2 * abs(x_opt[i - 1]))
x_opt.shape = (dim, 1)
z_vec = 100 * (
np.matmul(LambdaAlpha(10, dim), z_hat - 2 * abs(x_opt)) + 2 * abs(x_opt))
total = sum([z * math.sin(abs(z)**0.5) for z in z_vec.flat])
return -(total / (100.0 * dim)) + 4.189828872724339 + 100 * Fpen(z_vec / 100) | 1588dc5fa7864c3bd7ed5639ca44dafcd5d7f405 | 10,004 |
def article_idx_to_words_row(article_idx):
"""
Given a tuple with an article and an index, return a Row with the
index ad a list of the words in the article.
The words in the article are normalized, by removing all
non-'a-z|A-Z' characters.
Any stop words (words of less than 2 characters) are ignored.
:param article_idx: tuple
:type article_idx: tuple(defoe.papers.article.Article, int)
:return: Row
:rtype: pyspark.sql.Row
"""
article, idx = article_idx
words = []
for word in article.words:
normalized_word = query_utils.normalize(word)
if len(word) > 2: # Anything less is a stop word
words.append(normalized_word)
return Row(idx=idx, words=words) | 8a956e6be7d0b3e3076219929b8e5e2358f856ab | 10,005 |
from tensorflow.keras.mixed_precision import experimental as mixed_precision
def get_device_strategy(device, half=False, XLA=False, verbose=True):
"""
Returns the distributed strategy object, the tune policy anb the number of replicas.
Parameters
----------
device : str
Possible values are "TPU", "GPU", "CPU"
verbose : bool
Whether to print the output messages or not
Returns
-------
tf.distribute.TPUStrategy
The distributed strategy object
int
The auto tune constant
int
Number of TPU cores, to adjust batch size and learning rate
tf.distribute.cluster_resolver.TPUClusterResolver
The tpu object
"""
device = device.upper()
v = tf.__version__
tpu = None
if device == "TPU":
_log("connecting to TPU...", verbose)
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
_log('Running on TPU ' + tpu.master(), verbose)
except ValueError:
_log("Could not connect to TPU", verbose)
tpu = None
if tpu:
try:
_log("initializing TPU ...", verbose)
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu) if v >= '2.3.0' else tf.distribute.experimental.TPUStrategy(
tpu)
_log("TPU initialized", verbose)
if half:
policy = tf.keras.mixed_precision.experimental.Policy('mixed_bfloat16')
mixed_precision.set_policy(policy)
print('Mixed precision enabled')
if XLA:
tf.config.optimizer.set_jit(True)
print('Accelerated Linear Algebra enabled')
except:
_log("failed to initialize TPU", verbose)
device = "GPU"
else:
device = "GPU"
if device != "TPU":
_log("Using default strategy for CPU and single GPU", verbose)
strategy = tf.distribute.get_strategy()
if device == "GPU":
_log("Num GPUs Available: " + str(len(tf.config.experimental.list_physical_devices('GPU') if v < '2.1.0' else
tf.config.list_physical_devices('GPU'))), verbose)
tune = tf.data.experimental.AUTOTUNE
replicas = strategy.num_replicas_in_sync
_log(f'REPLICAS: {replicas}', verbose)
return strategy, tune, replicas, tpu | c0c5d29490876812d3a3724638a17ebb0abdd54e | 10,006 |
def make_mask(
pois_gdf,
link_gdf,
):
"""
:param pois_gdf:
:param link_gdf:
:return:
"""
mask = np.array([])
enum = np.array([])
return mask, enum | bd31fe0c0c9f1f1f38d1c4e1bf26bdeb3f2806ca | 10,007 |
def SEMIMINUS(r1, r2):
"""aka NOT MATCHING
(macro)"""
return MINUS(r1, SEMIJOIN(r1, r2)) | 225e3385b03420a52fb11703ee58a251ff2bacd6 | 10,008 |
def Ineg_wrapper(valS, valI):
"""
Function used to wrap Inequalities into a suitable form for optimisation
valS > valI --> Inequality is satisfied
valS and valI can be float or 1d array
"""
epsilon = 1e-6
top = 1e3
ecart = valI - valS
if ecart < epsilon:
out = np.exp(ecart) * epsilon / np.exp(epsilon)
elif ecart > top:
out = np.log(ecart) * top / np.log(top)
else:
out = ecart
return out | 1bf1f664845de8cc13750d6d021c1058687d91cc | 10,009 |
def preprocess_imgs(set_name, img_size):
"""
Resize and apply VGG-15 preprocessing
"""
set_new = []
for img in set_name:
img = cv2.resize(
img,
dsize=img_size,
interpolation=cv2.INTER_CUBIC
)
set_new.append(tf.keras.applications.vgg16.preprocess_input(img))
return np.array(set_new) | 52f1b677a053feac585b57847aab32c8d38c5b30 | 10,010 |
def get_shapes(galsim_img, center):
""" Get shapes
This function compute the moments of an image. Then return the sigma of the
window function used (size of the object) and the amplitude
(flux of the object).
Parameters
---------
galsim_img : galsim.image.Image
Galsim.image object containing the image.
center : tuple
Center of the object (x, y).
Returns
-------
sigma : float
Sigma of the window function, or -1 if an error occured.
amp : float
Moments amplitude, or -1 if an error occured.
"""
shapes = galsim.hsm.FindAdaptiveMom(galsim_img,
guess_centroid=galsim.PositionD(center),
strict=False)
if shapes.error_message == '':
return shapes.moments_sigma, shapes.moments_amp
else:
return -1, -1 | 3d6520d129c0c6bea93f91e332b477b777041a0b | 10,011 |
def ascending_super_operator(hamAB, hamBA, w_isometry, v_isometry, unitary,
refsym):
"""
ascending super operator for a modified binary MERA
ascends 'hamAB' and 'hamBA' up one layer
Args:
hamAB (tf.Tensor): local Hamiltonian on the A-B lattice
hamBA (tf.Tensor): local Hamiltonian on the B-A lattice
w_isometry (tf.Tensor): MERA isometry
v_isometry (tf.Tensor): MERA isometry
unitary (tf.Tensor): MERQA disentangler
refsym (bool): if true, enforce reflection symmetry
Returns:
hamABout (tf.Tensor): ascended Hamiltonian on A-B lattice
hamBAout (tf.Tensor): ascended Hamiltonian on B-A lattice
"""
indList1 = [[6, 4, 1, 2], [1, 3, -3], [6, 7, -1], [2, 5, 3, 9],
[4, 5, 7, 10], [8, 9, -4], [8, 10, -2]]
indList2 = [[3, 4, 1, 2], [5, 6, -3], [5, 7, -1], [1, 2, 6, 9],
[3, 4, 7, 10], [8, 9, -4], [8, 10, -2]]
indList3 = [[5, 7, 2, 1], [8, 9, -3], [8, 10, -1], [4, 2, 9, 3],
[4, 5, 10, 6], [1, 3, -4], [7, 6, -2]]
indList4 = [[3, 6, 2, 5], [2, 1, -3], [3, 1, -1], [5, 4, -4], [6, 4, -2]]
hamBAout = tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList1)
if refsym:
hamBAout = hamBAout + tf.transpose(hamBAout, (1, 0, 3, 2))
else:
hamBAout = hamBAout + tn.ncon([
hamAB, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList3)
hamBAout = hamBAout + tn.ncon([
hamBA, w_isometry,
tf.conj(w_isometry), unitary,
tf.conj(unitary), v_isometry,
tf.conj(v_isometry)
], indList2)
hamABout = tn.ncon([
hamBA, v_isometry,
tf.conj(v_isometry), w_isometry,
tf.conj(w_isometry)
], indList4)
return hamABout, hamBAout | 8692d2c0d02e82cb691c24977091665015aecdc6 | 10,012 |
def filteredhash(repo, maxrev):
"""build hash of filtered revisions in the current repoview.
Multiple caches perform up-to-date validation by checking that the
tiprev and tipnode stored in the cache file match the current repository.
However, this is not sufficient for validating repoviews because the set
of revisions in the view may change without the repository tiprev and
tipnode changing.
This function hashes all the revs filtered from the view and returns
that SHA-1 digest.
"""
cl = repo.changelog
if not cl.filteredrevs:
return None
key = cl._filteredrevs_hashcache.get(maxrev)
if not key:
revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
if revs:
s = hashutil.sha1()
for rev in revs:
s.update(b'%d;' % rev)
key = s.digest()
cl._filteredrevs_hashcache[maxrev] = key
return key | de606e22c499eb53d6d83f68900d234e76498e35 | 10,013 |
import logging
def filter_blast_by_amplicon(blast_hits, min_amplicon_len, max_amplicon_len):
"""
Filtering primers by putative amplicon that would be generated.
If the amplicon size is outsize of the min/max, then the primers not legit off-targets.
"""
logging.info('Filtering to only hits producing a legitimate amplicon...')
nonspec_primers = set()
for primer_id,d in blast_hits.items():
status = {'no_amp' : 0, 'hit' : 0, 'wrong_strand' : 0}
for saccver,dd in d.items():
if primer_id in nonspec_primers:
break
# hits for primer pair?
try:
_,_ = dd['f'],dd['r']
except KeyError:
status['no_amp'] += 1
continue
# calc amplicon size of any expanded fwd-rev pair
for x in dd['f']:
if primer_id in nonspec_primers:
break
for y in dd['r']:
amp_len = calc_amp_len(x[0], x[1], y[0], y[1])
if (x[2] != y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# legit hit: different strand & amplicon_len w/in size range
nonspec_primers.add(primer_id)
status['hit'] += 1
break
elif (x[2] == y[2] and amp_len >= min_amplicon_len
and amp_len <= max_amplicon_len):
# same strand, but correct amplicon size
status['wrong_strand'] += 1
# summary
msg = ' Primer {}: legit amplicon: {}, no amplicon: {}'
logging.info(msg.format(primer_id, status['hit'], status['no_amp']))
# summary
msg = ' No. of primers producing a legit non-target amplicon: {}'
logging.info(msg.format(len(nonspec_primers)))
return nonspec_primers | 8f84a5d615f65e7c21d5135d3f585b91c0f4667b | 10,014 |
def determine_channel(channel_as_text):
"""Determine which channel the review is for according to the channel
parameter as text, and whether we should be in content-review only mode."""
if channel_as_text == 'content':
# 'content' is not a real channel, just a different review mode for
# listed add-ons.
content_review = True
channel = 'listed'
else:
content_review = False
# channel is passed in as text, but we want the constant.
channel = amo.CHANNEL_CHOICES_LOOKUP.get(
channel_as_text, amo.RELEASE_CHANNEL_LISTED)
return channel, content_review | db8eeaae3c953cf497135f4d6e6071275a626dc2 | 10,016 |
import requests
def get_device_config(device_name, dnac_jwt_token):
"""
This function will get the configuration file for the device with the name {device_name}
:param device_name: device hostname
:param dnac_jwt_token: DNA C token
:return: configuration file
"""
device_id = get_device_id_name(device_name, dnac_jwt_token)
url = DNAC_URL + '/api/v1/network-device/' + device_id + '/config'
header = {'content-type': 'application/json', 'x-auth-token': dnac_jwt_token}
response = requests.get(url, headers=header, verify=False)
config_json = response.json()
config_file = config_json['response']
return config_file | b092efbe307f3f7a73cc998275ad67ea064cd3ed | 10,018 |
def get_ideas():
"""
Gets all ideas from mongo
"""
return find('ideas') | e6c8a152c2bca775e17d6fa52b262b334ac693c0 | 10,019 |
import numpy
def uppercase_dtype(dtype):
""" Convert a dtype to upper case. A helper function.
Do not use.
"""
pairs = dict([(key.upper(), dtype.fields[key]) for key in dtype.names])
dtype = numpy.dtype(pairs)
return dtype | bf28581dbb6a857a12c1b056a5e1b6f7bdbbbc27 | 10,020 |
from typing import Dict
def mlp_prior(input_dim: int, zdim: int = 2) -> Dict[str, jnp.array]:
"""Priors over weights and biases in the default Bayesian MLP"""
hdim = [64, 32]
def _bnn_prior(task_dim: int):
w1 = sample_weights("w1", input_dim, hdim[0], task_dim)
b1 = sample_biases("b1", hdim[0], task_dim)
w2 = sample_weights("w2", hdim[0], hdim[1], task_dim)
b2 = sample_biases("b2", hdim[1], task_dim)
w3 = sample_weights("w3", hdim[1], zdim, task_dim)
b3 = sample_biases("b3", zdim, task_dim)
return {"w1": w1, "b1": b1, "w2": w2, "b2": b2, "w3": w3, "b3": b3}
return _bnn_prior | 29c1d751f09a8da0c9f68209a5bcd48db12e1ca1 | 10,022 |
def get_biggest_spread_by_symbol(exchanges, symbol):
"""Get biggest spread by symbol."""
ask_exchange_id = ""
min_ask_price = 99999999
bid_exchange_id = ""
max_bid_price = 0
for exchange_id in exchanges:
exchange = eval("ccxt.{0}()".format(exchange_id))
try:
order_book = exchange.fetch_order_book(symbol)
bid_price = (
order_book["bids"][0][0] if len(order_book["bids"]) > 0 else None
)
ask_price = (
order_book["asks"][0][0] if len(order_book["asks"]) > 0 else None
)
if ask_price < min_ask_price:
ask_exchange_id = exchange_id
min_ask_price = ask_price
if bid_price > max_bid_price:
bid_exchange_id = exchange_id
max_bid_price = bid_price
increase_percentage = (bid_price - ask_price) / ask_price * 100
if increase_percentage >= 1:
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price
except Exception as e:
# pass
print(e)
print("{0} - There is an error!".format(exchange_id))
min_ask_price += 0.235
max_bid_price -= 0.235
return ask_exchange_id, min_ask_price, bid_exchange_id, max_bid_price | 20eda8274e513d1e098c34c309833c58be6dbb4e | 10,023 |
def update_user():
"""User update route
:return: action status
"""
if 'data' in request.json:
data = request.json['data']
if ('profile' in data) and ('theme' in data['profile']):
current_user.profile.theme = data['profile']['theme']
services.db.session.commit()
return jsonify({
'status': 'success',
'message': 'User profile updated successfully.'
}) | f6b98a0e06f7b898737ffa0e6c395f2ddd18fc7b | 10,025 |
from typing import List
def on_deck(elements: List[int], all_vars):
"""all of the elements must be within the deck"""
rules = []
for element in elements:
var = all_vars[element - 1]
rules.append(var >= 1)
rules.append(var <= 52)
return rules | 2e90dfa45bd90a7c3b834000e070631af5952f36 | 10,026 |
def transform_data_to_dictionary(elements):
"""Parses each element in the list and parses it in a dictionary
Args:
elements (list): list of html elements
Returns:
dictionary: treated information.
"""
url_informations = {}
for n in range(0, len(elements), 2):
url_informations[clean_names(elements[n].text)] = elements[n+1]
return url_informations | fd81fe7b6093577f32e460cb8a4d22cbbec92789 | 10,027 |
import math
import torch
def postprocess_new(u, x, lr_min, lr_max, num_itr, rho=0.0, with_l1=False,s=math.log(9.0)):
"""
:param u: utility matrix, u is assumed to be symmetric, in batch
:param x: RNA sequence, in batch
:param lr_min: learning rate for minimization step
:param lr_max: learning rate for maximization step (for lagrangian multiplier)
:param num_itr: number of iterations
:param rho: sparsity coefficient
:param with_l1:
:return:
"""
m = constraint_matrix_batch(x).float()
# u with threshold
# equivalent to sigmoid(u) > 0.9
# u = (u > math.log(9.0)).type(torch.FloatTensor) * u
u = soft_sign(u - s) * u
# initialization
a_hat = (torch.sigmoid(u)) * soft_sign(u - s).detach()
lmbd = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1).detach()
# gradient descent
for t in range(num_itr):
grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
a_hat -= lr_min * grad
lr_min = lr_min * 0.99
if with_l1:
a_hat = F.relu(torch.abs(a_hat) - rho * lr_min)
lmbd_grad = F.relu(torch.sum(contact_a(a_hat, m), dim=-1) - 1)
lmbd += lr_max * lmbd_grad
lr_max = lr_max * 0.99
# print
# if t % 20 == 19:
# n1 = torch.norm(lmbd_grad)
# grad_a = (lmbd * soft_sign(torch.sum(contact_a(a_hat, m), dim=-1) - 1)).unsqueeze_(-1).expand(u.shape) - u / 2
# grad = a_hat * m * (grad_a + torch.transpose(grad_a, -1, -2))
# n2 = torch.norm(grad)
# print([t, 'norms', n1, n2, aug_lagrangian(u, m, a_hat, lmbd), torch.sum(contact_a(a_hat, u))])
a = a_hat * a_hat
a = (a + torch.transpose(a, -1, -2)) / 2
a = a * m
return a | 51fb589a2a8ccaeb96b06192f6050ded91f81f07 | 10,028 |
def parse_api_error(response):
"""
Parse the error-message from the API Response.
Assumes, that a check if there is an error present was done beforehand.
:param response: Dict of the request response ([imdata][0][....])
:type response: ``dict``
:returns: Parsed Error-Text
:rtype: ``str``
"""
if "error" in response["imdata"][0]:
return (
"API-Errorcode "
+ str(response["imdata"][0]["error"]["attributes"]["code"])
+ ": "
+ str(response["imdata"][0]["error"]["attributes"]["text"])
)
else:
return "Unparseable: " + str(response) | acc4256b3245e3e2c10e3ba998bf577e0f51a33e | 10,029 |
from typing import Union
def login_manual_user_device(username: str, password: str, mac_address: str) -> Union[str, Token]:
"""Try to login by username and password. A token for auto-login is returned"""
possible_user = User.get_by_username(username)
if possible_user is None:
fail_msg = f"No user with username: {username}."
else:
user = possible_user
if not pwd_context.verify(password, user.password):
fail_msg = f"Wrong password"
else:
token, device_id = _add_update_device(user.id, mac_address)
_set_user_authenticated(user.id, device_id)
client_logger_security().info(f"Successfully logged in manual: device_id={device_id}, user_id={user.user_id}, "
f"token={token}")
return token
client_logger_security().info(f"Failed to login manual: {fail_msg}")
return "Wrong username or password" | 5dd6e1043ffea2cceacf1fc83e9713b4b0fd827b | 10,030 |
def corrector_new(Ybus, Ibus, Sbus, V0, pv, pq, lam0, Sxfr, Vprv, lamprv, z, step, parametrization, tol, max_it,
verbose, max_it_internal=10):
"""
Solves the corrector step of a continuation power flow using a full Newton method
with selected parametrization scheme.
solves for bus voltages and lambda given the full system admittance
matrix (for all buses), the complex bus power injection vector (for
all buses), the initial vector of complex bus voltages, and column
vectors with the lists of bus indices for the swing bus, PV buses, and
PQ buses, respectively. The bus voltage vector contains the set point
for generator (including ref bus) buses, and the reference angle of the
swing bus, as well as an initial guess for remaining magnitudes and
angles.
Uses default options if this parameter is not given. Returns the
final complex voltages, a flag which indicates whether it converged or not,
the number of iterations performed, and the final lambda.
:param Ybus: Admittance matrix (CSC sparse)
:param Ibus: Bus current injections
:param Sbus: Bus power injections
:param V0: Bus initial voltages
:param pv: list of pv nodes
:param pq: list of pq nodes
:param lam0: initial value of lambda (loading parameter)
:param Sxfr: [delP+j*delQ] transfer/loading vector for all buses
:param Vprv: final complex V corrector solution from previous continuation step
:param lamprv: final lambda corrector solution from previous continuation step
:param z: normalized predictor for all buses
:param step: continuation step size
:param parametrization:
:param tol:
:param max_it:
:param verbose:
:return: V, CONVERGED, I, LAM
"""
"""
# CPF_CORRECTOR Solves the corrector step of a continuation power flow using a
# full Newton method with selected parametrization scheme.
# [V, CONVERGED, I, LAM] = CPF_CORRECTOR(YBUS, SBUS, V0, REF, PV, PQ, ...
# LAM0, SXFR, VPRV, LPRV, Z, STEP, parametrization, MPOPT)
# solves for bus voltages and lambda given the full system admittance
# matrix (for all buses), the complex bus power injection vector (for
# all buses), the initial vector of complex bus voltages, and column
# vectors with the lists of bus indices for the swing bus, PV buses, and
# PQ buses, respectively. The bus voltage vector contains the set point
# for generator (including ref bus) buses, and the reference angle of the
# swing bus, as well as an initial guess for remaining magnitudes and
# angles. MPOPT is a MATPOWER options struct which can be used to
# set the termination tolerance, maximum number of iterations, and
# output options (see MPOPTION for details). Uses default options if
# this parameter is not given. Returns the final complex voltages, a
# flag which indicates whether it converged or not, the number
# of iterations performed, and the final lambda.
#
# The extra continuation inputs are LAM0 (initial predicted lambda),
# SXFR ([delP+j*delQ] transfer/loading vector for all buses), VPRV
# (final complex V corrector solution from previous continuation step),
# LAMPRV (final lambda corrector solution from previous continuation step),
# Z (normalized predictor for all buses), and STEP (continuation step size).
# The extra continuation output is LAM (final corrector lambda).
#
# See also RUNCPF.
# MATPOWER
# Copyright (c) 1996-2015 by Power System Engineering Research Center (PSERC)
# by Ray Zimmerman, PSERC Cornell,
# Shrirang Abhyankar, Argonne National Laboratory,
# and Alexander Flueck, IIT
#
# Modified by Alexander J. Flueck, Illinois Institute of Technology
# 2001.02.22 - corrector.m (ver 1.0) based on newtonpf.m (MATPOWER 2.0)
#
# Modified by Shrirang Abhyankar, Argonne National Laboratory
# (Updated to be compatible with MATPOWER version 4.1)
#
# $Id: cpf_corrector.m 2644 2015-03-11 19:34:22Z ray $
#
# This file is part of MATPOWER.
# Covered by the 3-clause BSD License (see LICENSE file for details).
# See http://www.pserc.cornell.edu/matpower/ for more info.
"""
# initialize
converged = False
i = 0
V = V0
Va = angle(V)
Vm = np.abs(V)
dVa = np.zeros_like(Va)
dVm = np.zeros_like(Vm)
lam = lam0 # set lam to initial lam0
# set up indexing for updating V
npv = len(pv)
npq = len(pq)
pvpq = r_[pv, pq]
nj = npv + npq * 2
nb = len(V) # number of buses
j1 = 1
'''
# MATLAB code
j2 = npv # j1:j2 - V angle of pv buses
j3 = j2 + 1
j4 = j2 + npq # j3:j4 - V angle of pq buses
j5 = j4 + 1
j6 = j4 + npq # j5:j6 - V mag of pq buses
j7 = j6 + 1
j8 = j6 + 1 # j7:j8 - lambda
'''
# j1:j2 - V angle of pv buses
j1 = 0
j2 = npv
# j3:j4 - V angle of pq buses
j3 = j2
j4 = j2 + npq
# j5:j6 - V mag of pq buses
j5 = j4
j6 = j4 + npq
j7 = j6
j8 = j6 + 1
# evaluate F(x0, lam0), including Sxfr transfer/loading
mismatch = V * conj(Ybus * V) - Sbus - lam * Sxfr
# F = r_[mismatch[pvpq].real, mismatch[pq].imag]
# evaluate P(x0, lambda0)
P = cpf_p(parametrization, step, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment F(x,lambda) with P(x,lambda)
F = r_[mismatch[pvpq].real, mismatch[pq].imag, P]
# check tolerance
last_error = linalg.norm(F, Inf)
error = 1e20
if last_error < tol:
converged = True
if verbose:
print('\nConverged!\n')
# do Newton iterations
while not converged and i < max_it:
# update iteration counter
i += 1
# evaluate Jacobian
J = Jacobian(Ybus, V, Ibus, pq, pvpq)
dF_dlam = -r_[Sxfr[pvpq].real, Sxfr[pq].imag]
dP_dV, dP_dlam = cpf_p_jac(parametrization, z, V, lam, Vprv, lamprv, pv, pq, pvpq)
# augment J with real/imag - Sxfr and z^T
'''
J = [ J dF_dlam
dP_dV dP_dlam ]
'''
J = vstack([hstack([J, dF_dlam.reshape(nj, 1)]),
hstack([dP_dV, dP_dlam])], format="csc")
# compute update step
dx = -spsolve(J, F)
# reassign the solution vector
if npv:
dVa[pv] = dx[j1:j2]
if npq:
dVa[pq] = dx[j3:j4]
dVm[pq] = dx[j5:j6]
# update lambda
lam += dx[j7:j8][0]
# reset mu
mu_ = 1.0
print('iter', i)
it = 0
Vm = np.abs(V)
Va = np.angle(V)
while error >= last_error and it < max_it_internal:
# update voltage the Newton way (mu=1)
Vm_new = Vm + mu_ * dVm
Va_new = Va + mu_ * dVa
V_new = Vm_new * exp(1j * Va_new)
print('\t', mu_, error, last_error)
# evaluate F(x, lam)
mismatch = V_new * conj(Ybus * V_new) - Sbus - lam * Sxfr
# evaluate P(x, lambda)
P = cpf_p(parametrization, step, z, V_new, lam, Vprv, lamprv, pv, pq, pvpq)
# compose the mismatch vector
F = r_[mismatch[pv].real,
mismatch[pq].real,
mismatch[pq].imag,
P]
# check for convergence
error = linalg.norm(F, Inf)
# modify mu
mu_ *= 0.25
it += 1
V = V_new.copy()
last_error = error
if verbose:
print('\n#3d #10.3e', i, error)
if error < tol:
converged = True
if verbose:
print('\nNewton''s method corrector converged in ', i, ' iterations.\n')
if verbose:
if not converged:
print('\nNewton method corrector did not converge in ', i, ' iterations.\n')
return V, converged, i, lam, error | e4ff6d31916c34768152af998c1bc5ff4fdebcb7 | 10,031 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.