content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def first(id_):
"""The first service of the station."""
return jsonify(
[
(n.removeprefix("_"), t)
for n, t in r.zrange(f"Station:_{id_}:first", 0, -1, withscores=True)
]
) | 5de7ef49c1deb0edea3f3d4ddcc30da8c4ae2d3a | 9,900 |
def sample(df, n, shape):
"""
randomly sample patch images from DataFrame
Parameters
----------
df : pd.DataFrame
DataFrame containing name of image files
n : int
number of patches to extract
shape : list
shape of patches to extract
Returns
-------
images : (n, n_channels, shape[0], shape[1], ...) ndarray
input patches
labels : (n, shape[0], shape[1], ...) ndarray
label patches
"""
N = len(df)
assert N >= n, "n should be smaller than or equal to " + str(N)
indices = np.random.choice(N, n, replace=False)
image_files = df["image"][indices]
label_files = df["label"][indices]
images = []
labels = []
for image_file, label_file in zip(image_files, label_files):
image = load_nifti(image_file)
label = load_nifti(label_file).astype(np.int32)
mask = np.int32(label > 0)
slices = [slice(len_ / 2, -len_ / 2) for len_ in shape]
mask[slices] *= 2
indices = np.where(mask > 1.5)
i = np.random.choice(len(indices[0]))
slices = [
slice(index[i] - len_ / 2, index[i] + len_ / 2)
for index, len_ in zip(indices, shape)]
image_patch = image[slices]
label_patch = label[slices]
image_patch = image_patch.transpose(3, 0, 1, 2)
images.append(image_patch)
labels.append(label_patch)
images = np.array(images)
labels = np.array(labels)
return images, labels | a0f655ad71a7ef47c71ecc23f45365f721bc2085 | 9,901 |
def bitsizeof_varint32(value: int) -> int:
"""
Gets bit size of variable 32-bit signed integer value.
:param value: Value to use for bit size calculation.
:returns: Bit size of the value.
:raises PythonRuntimeException: Throws if given value is out of range for varint32 type.
"""
return _bitsizeof_varnum(abs(value), VARINT32_MAX_VALUES, "varint32") | 16fd942d23b6ec26a300119e56fbbad6f37a203d | 9,902 |
import requests
import json
def slack(channel, message, subject=''):
"""
Sends a notification to meerkat slack server. Channel is '#deploy' only if
in live deployment, otherwise sent privately to the developer via slackbot.
Args:
channel (str): Required. The channel or username to which the message
should be posted.
message (str): Required. The message to post to slack.
subject (str): Optional. Placed in bold and seperated by a pipe.
return "sent"
"""
# Assemble the message text string
text = str(message)
if subject:
text = "*_{}_* | {}".format(subject, message)
# Send the slack message
message = {'text': text, 'channel': channel, 'username': 'Meerkat'}
url = ('https://hooks.slack.com/services/T050E3XPP/'
'B0G7UKUCA/EtXIFB3CRGyey2L7x5WbT32B')
headers = {'Content-Type': 'application/json'}
r = requests.post(url, data=json.dumps(message), headers=headers)
# Return the slack response
return r | 274111901fd3b7545c8e2128ce3e98716eca8406 | 9,903 |
def compute_similarity(img_one,img_two):
"""Performs image resizing just to compute the
cosine similarity faster
Input:
Two images
Output:
Cosine Similarity
"""
x = cv2.resize(img_one, dsize=(112, 112), interpolation=cv2.INTER_CUBIC)
y = cv2.resize(img_two, dsize=(112, 112), interpolation=cv2.INTER_CUBIC)
x = x.ravel().reshape(-1, 1)
y = y.ravel().reshape(-1, 1)
if x.shape[0] != y.shape[0]:
dist = 0
else:
dist = 1 - distance.cosine(x, y)
return dist | 24d02db8c685480572fae2ab02f9648e3798fdf3 | 9,904 |
def get_results_from_firebase(firebase):
"""
The function to download all results from firebase
Parameters
----------
firebase : pyrebase firebase object
initialized firebase app with admin authentication
Returns
-------
results : dict
The results in a dictionary with the following format:
{
"task_id" {
"user1_id": {
"data": {...}
},
"user2_id": {
"data": {...}
},
}
}
"""
fb_db = firebase.database()
results = fb_db.child("results").get().val()
return results | ca06c24367d778d4b601eab6fa31009fe6ecb372 | 9,905 |
def mtci_vi(imgData, wave, mask=0, bands=[-1,-1,-1]):
"""
Function that calculates the MERIS Terrestrial Chlorophyll Index.
This functions uses wavelengths 753.75, 708.75, and 681.25 nm. The closest bands to these values will be used.
Citation: Dash, J. and Curran, P.J. 2004. The MERIS terrestrial chlorophyll index, International Journal of Remote Sensing, 25(23), 5403–5413.
INPUTS:
1) imgData: an array of hyperspectral data either as 3D [n_row x n_col x n_band] or 2D [n_row x n_band]
2) wave: an array of wavelengths in nanometers that correspond to the n_bands in imgData
3) mask: OPTIONAL - a binary array (same size as imgData) that designates which pixels should be included in analysis. Pixels with 1 are used, while pixels with 0 are not.
4) bands: OPTIONAL - if the user wants to define the bands used in the function provide the band index (not in nm) for each wavelength in this order [681.25, 708.75, 753.75 nm].
OUTPUTS:
1) vi: the calculated spectral index value for each pixel either returned as [n_row x n_col x 1] or [n_row x 1]
02/2020 - Susan Meerdink
"""
# Determine the bands used in function
if len(bands) == 3:
if bands[0] == -1:
idx_681 = (np.abs(wave - 681.25)).argmin()
else:
idx_681 = bands[0]
if bands[1] == -1:
idx_708 = (np.abs(wave - 708.75)).argmin()
else:
idx_708 = bands[1]
if bands[2] == -1:
idx_753 = (np.abs(wave - 753.75)).argmin()
else:
idx_753 = bands[2]
print('MTCI calls for bands 681.25, 708.75, and 753.75 nm. Using bands ' + str(wave[idx_681]) +', '+ str(wave[idx_708])+', '+ str(wave[idx_753]))
else:
raise Exception('Not enough band indexes are provided by user.')
# 3D data, hyperspectral image, [n_row x n_col x n_band]
if imgData.ndim > 2:
data_753 = np.reshape(imgData[:,:,idx_753],[-1,1])
data_708 = np.reshape(imgData[:,:,idx_708],[-1,1])
data_681 = np.reshape(imgData[:,:,idx_681],[-1,1])
# 2D data, flattened hyperspectral data, [n_row x n_band]
else:
data_753 = imgData[:,idx_753]
data_708 = imgData[:,idx_708]
data_681 = imgData[:,idx_681]
# Calculate MTCI
index = (data_753 - data_708)/(data_708 - data_681)
# If data was 3D, reshape the index value back into 3D shape
if imgData.ndim > 2:
index = np.reshape(index,[imgData.shape[0],imgData.shape[1]])
if isinstance(mask, int) is False:
idx_x, idx_y = np.where(mask==0)
index[idx_x,idx_y] = 0
return index | b1f88d2041d8cf9fa645316b47db472e3626f1f8 | 9,906 |
def GetSourceFile(file, sourcepath):
"""Return a relative file if it is embedded in a path."""
for root in sourcepath:
if file.find(root) == 0:
prefix_length = len(root)
if not root.endswith('/'):
prefix_length += 1
relative_file = file[prefix_length:]
return relative_file
return None | b241497131c3595f78ebf9d1481c8d9d50887e5a | 9,907 |
import os
import subprocess
def trace_cmd_installed():
"""Return true if trace-cmd is installed, false otherwise"""
with open(os.devnull) as devnull:
try:
subprocess.check_call(["trace-cmd", "options"], stdout=devnull)
except OSError:
return False
return True | 7135c0b2ebfa46b69df5b692178e6b569d3014dd | 9,908 |
import configparser
import sys
def getmessage(msg_type) :
""" Renvoie le message qui doit être affiché """
cfg = configparser.ConfigParser()
cfg.read(clt_path)
if not msg_type in cfg.options('Messages') :
sys.stderr.write("{} is not a valide type message : {}".format(msg_type, cfg.options('Messages')))
sys.stderr.flush()
return cfg.get('Messages', msg_type) | 5e751d2f1a5df998f409f07f191f759e935a8b9d | 9,909 |
def refine_gene_list(adata, layer, gene_list, threshold, return_corrs=False):
"""Refines a list of genes by removing those that don't correlate well with the average expression of
those genes
Parameters
----------
adata: an anndata object.
layer: `str` or None (default: `None`)
The layer of data to use for calculating correlation. If None, use adata.X.
gene_list: list of gene names
threshold: threshold on correlation coefficient used to discard genes (expression of each gene is
compared to the bulk expression of the group and any gene with a correlation coefficient less
than this is discarded)
return_corrs: whether to return the correlations along with the gene names (default: False)
Returns
-------
Refined list of genes that are well correlated with the average expression trend
"""
gene_list, corrs = group_corr(adata, layer, gene_list)
if (return_corrs):
return corrs[corrs >= threshold]
else:
return gene_list[corrs >= threshold] | 0b26b5265bf62a6f771bb762cd3c497fa628c5c3 | 9,910 |
def shape_to_coords(value, precision=6, wkt=False, is_point=False):
"""
Convert a shape (a shapely object or well-known text) to x and y coordinates
suitable for use in Bokeh's `MultiPolygons` glyph.
"""
if is_point:
value = Point(*value).buffer(0.1 ** precision).envelope
x_coords = list()
y_coords = list()
if wkt:
value = loads(value)
if not hasattr(value, '__len__'):
value = [value]
for v in value:
x_dict = dict()
y_dict = dict()
if not hasattr(v, 'exterior'):
v = v.buffer(0)
x_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[0]]
x_dict['holes'] = [[round(y, precision) for y in x.coords.xy[0]] for x in v.interiors]
y_dict['exterior'] = [round(x, precision) for x in v.exterior.coords.xy[1]]
y_dict['holes'] = [[round(y, precision) for y in x.coords.xy[1]] for x in v.interiors]
x_coords.append(x_dict)
y_coords.append(y_dict)
return x_coords, y_coords | 1b585f6bb9831db63b2e0e8c52b6fb29ba0d9ab9 | 9,911 |
import argparse
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Test Runner script')
parser.add_argument('-c', '--controller', type=str, required=True, help='Controller host name')
parser.add_argument('-s', '--server', type=str, required=True, help='Cluster Server hostname')
parser.add_argument('-e', '--export', type=str, help='NFS Export Name', default="/")
parser.add_argument('-n', '--nodes', type=int, help='Number of active nodes', default=0)
parser.add_argument('-d', '--domains', type=int, help='Number of fs domains', default=0)
parser.add_argument('-m', '--mtype', type=str, help='Mount Type', choices=['nfs3', 'nfs4', 'nfs4.1', 'smb1', 'smb2',
'smb3'], default="nfs3")
parser.add_argument('--start_vip', type=str, help="Start VIP address range")
parser.add_argument('--end_vip', type=str, help="End VIP address range")
parser.add_argument('-l', '--locking', type=str, help='Locking Type', choices=['native', 'application', 'off'],
default="native")
args = parser.parse_args()
return args | 8bb488e026c13ddc008ea2877b1d0ae9f904f970 | 9,912 |
def max_union(map_list):
"""
Element-wise maximum of the union of a list of HealSparseMaps.
Parameters
----------
map_list : `list` of `HealSparseMap`
Input list of maps to compute the maximum of
Returns
-------
result : `HealSparseMap`
Element-wise maximum of maps
"""
return _apply_operation(map_list, np.fmax, 0, union=True, int_only=False) | 169fef50486e22468f8942f6968630e8fdef6648 | 9,913 |
def getDtypes(attributes, forecastHorizon):
"""
Auxillary function to generate dictionary of datatypes for data queried from dynamo.
Parameters
----------
attributes : list,
Attributes queried from dynamo.
forecastHorizon : integer,
Number of forecast horizons which have been queried.
Returns
-------
attributeDtypes : dict,
Dictionary to pass to dataframe to specify dtypes of all data queried.
"""
dtypes = {
"apparentTemperature": np.float64,
"cloudCover": np.float64,
"dewPoint": np.float64,
"humidity": np.float64,
"precipIntensity": np.float64,
"precipProbability": np.float64,
"pressure": np.float64,
"temperature": np.float64,
"uvIndex": np.float64,
"visibility": np.float64,
"windBearing": np.float64,
"windGust": np.float64,
"windSpeed": np.float64,
"carbonFactor": np.float64,
"carbonIndex": str
}
attributeDtypes = dict()
attributeDtypes["unixTimestamp"] = np.int32
for attribute in attributes:
dtype = dtypes[attribute]
for x in range(forecastHorizon+1):
attributeDtypes[attribute + "_" + str(x)] = dtype
return attributeDtypes | 4974b7fe8107b36556da41173508c908785ddf5f | 9,914 |
def cazy_synonym_dict():
"""Create a dictionary of accepted synonms for CAZy classes."""
cazy_dict = {
"Glycoside Hydrolases (GHs)": ["Glycoside-Hydrolases", "Glycoside-Hydrolases", "Glycoside_Hydrolases", "GlycosideHydrolases", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE-HYDROLASES", "GLYCOSIDE_HYDROLASES", "GLYCOSIDEHYDROLASES", "glycoside-hydrolases", "glycoside-hydrolases", "glycoside_hydrolases", "glycosidehydrolases", "GH", "gh"],
"GlycosylTransferases (GTs)": ["Glycosyl-Transferases", "GlycosylTransferases", "Glycosyl_Transferases", "Glycosyl Transferases", "GLYCOSYL-TRANSFERASES", "GLYCOSYLTRANSFERASES", "GLYCOSYL_TRANSFERASES", "GLYCOSYL TRANSFERASES", "glycosyl-transferases", "glycosyltransferases", "glycosyl_transferases", "glycosyl transferases", "GT", "gt"],
"Polysaccharide Lyases (PLs)": ["Polysaccharide Lyases", "Polysaccharide-Lyases", "Polysaccharide_Lyases", "PolysaccharideLyases", "POLYSACCHARIDE LYASES", "POLYSACCHARIDE-LYASES", "POLYSACCHARIDE_LYASES", "POLYSACCHARIDELYASES", "polysaccharide lyases", "polysaccharide-lyases", "polysaccharide_lyases", "polysaccharidelyases", "PL", "pl"],
"Carbohydrate Esterases (CEs)": ["Carbohydrate Esterases", "Carbohydrate-Esterases", "Carbohydrate_Esterases", "CarbohydrateEsterases", "CARBOHYDRATE ESTERASES", "CARBOHYDRATE-ESTERASES", "CARBOHYDRATE_ESTERASES", "CARBOHYDRATEESTERASES", "carbohydrate esterases", "carbohydrate-esterases", "carbohydrate_esterases", "carbohydrateesterases", "CE", "ce"],
"Auxiliary Activities (AAs)": ["Auxiliary Activities", "Auxiliary-Activities", "Auxiliary_Activities", "AuxiliaryActivities", "AUXILIARY ACTIVITIES", "AUXILIARY-ACTIVITIES", "AUXILIARY_ACTIVITIES", "AUXILIARYACTIVITIES", "auxiliary activities", "auxiliary-activities", "auxiliary_activities", "auxiliaryactivities", "AA", "aa"],
"Carbohydrate-Binding Modules (CBMs)": ["Carbohydrate-Binding-Modules", "Carbohydrate_Binding_Modules", "Carbohydrate_Binding Modules", "CarbohydrateBindingModules", "CARBOHYDRATE-BINDING-MODULES", "CARBOHYDRATE_BINDING_MODULES", "CARBOHYDRATE_BINDING MODULES", "CARBOHYDRATEBINDINGMODULES", "carbohydrate-binding-modules", "carbohydrate_binding_modules", "carbohydrate_binding modules", "carbohydratebindingmodules", "CBMs", "CBM", "cbms", "cbm"]
}
return cazy_dict | 0d635075901cc3e6ba7b432c68e5be3f7d2c34d6 | 9,915 |
def new_oauth2ProviderLimited(pyramid_request):
"""this is used to build a new auth"""
validatorHooks = CustomValidator_Hooks(pyramid_request)
provider = oauth2_provider.OAuth2Provider(
pyramid_request,
validator_api_hooks=validatorHooks,
validator_class=CustomValidator,
server_class=WebApplicationServer,
)
return provider | ef15f43dfa0549431931210d788fd8ccde611634 | 9,916 |
def rand_color(red=(92, 220), green=(92, 220), blue=(92, 220)):
""" Random red, green, blue with the option to limit the ranges.
The ranges are tuples 0..255.
"""
r = rand_byte(red)
g = rand_byte(green)
b = rand_byte(blue)
return f"#{r:02x}{g:02x}{b:02x}" | 43244b5912585a4496abbd6868f97a368fd785f0 | 9,917 |
import base64
def tile_to_html(tile, fig_size=None):
""" Provide HTML string representation of Tile image."""
b64_img_html = '<img src="data:image/png;base64,{}" />'
png_bits = tile_to_png(tile, fig_size=fig_size)
b64_png = base64.b64encode(png_bits).decode('utf-8').replace('\n', '')
return b64_img_html.format(b64_png) | 9e22304c9ee44a850e17930088b0fc81b390fded | 9,918 |
def generate_buchwald_hartwig_rxns(df):
"""
Converts the entries in the excel files from Sandfort et al. to reaction SMILES.
"""
df = df.copy()
fwd_template = '[F,Cl,Br,I]-[c;H0;D3;+0:1](:[c,n:2]):[c,n:3].[NH2;D1;+0:4]-[c:5]>>[c,n:2]:[c;H0;D3;+0:1](:[c,n:3])-[NH;D2;+0:4]-[c:5]'
methylaniline = 'Cc1ccc(N)cc1'
pd_catalyst = Chem.MolToSmiles(Chem.MolFromSmiles('O=S(=O)(O[Pd]1~[NH2]C2C=CC=CC=2C2C=CC=CC1=2)C(F)(F)F'))
methylaniline_mol = Chem.MolFromSmiles(methylaniline)
rxn = rdChemReactions.ReactionFromSmarts(fwd_template)
products = []
for i, row in df.iterrows():
reacts = (Chem.MolFromSmiles(row['Aryl halide']), methylaniline_mol)
rxn_products = rxn.RunReactants(reacts)
rxn_products_smiles = set([Chem.MolToSmiles(mol[0]) for mol in rxn_products])
assert len(rxn_products_smiles) == 1
products.append(list(rxn_products_smiles)[0])
df['product'] = products
rxns = []
can_smiles_dict = {}
for i, row in df.iterrows():
aryl_halide = canonicalize_with_dict(row['Aryl halide'], can_smiles_dict)
can_smiles_dict[row['Aryl halide']] = aryl_halide
ligand = canonicalize_with_dict(row['Ligand'], can_smiles_dict)
can_smiles_dict[row['Ligand']] = ligand
base = canonicalize_with_dict(row['Base'], can_smiles_dict)
can_smiles_dict[row['Base']] = base
additive = canonicalize_with_dict(row['Additive'], can_smiles_dict)
can_smiles_dict[row['Additive']] = additive
reactants = f"{aryl_halide}.{methylaniline}.{pd_catalyst}.{ligand}.{base}.{additive}"
rxns.append(f"{reactants}>>{row['product']}")
return rxns | 80351743c2f651965735f38b514d7af017fc25ce | 9,919 |
from niworkflows.engine.workflows import LiterateWorkflow as Workflow
from niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms
from niworkflows.interfaces.itk import MultiApplyTransforms
from niworkflows.interfaces.utility import KeySelect
from niworkflows.interfaces.nibabel import GenerateSamplingReference
from niworkflows.interfaces.nilearn import Merge
from niworkflows.utils.spaces import format_reference
def init_bold_std_trans_wf(
mem_gb,
omp_nthreads,
spaces,
name="bold_std_trans_wf",
use_compression=True,
use_fieldwarp=False,
):
"""
Sample fMRI into standard space with a single-step resampling of the original BOLD series.
.. important::
This workflow provides two outputnodes.
One output node (with name ``poutputnode``) will be parameterized in a Nipype sense
(see `Nipype iterables
<https://miykael.github.io/nipype_tutorial/notebooks/basic_iteration.html>`__), and a
second node (``outputnode``) will collapse the parameterized outputs into synchronous
lists of the output fields listed below.
Workflow Graph
.. workflow::
:graph2use: colored
:simple_form: yes
from niworkflows.utils.spaces import SpatialReferences
from fprodents.workflows.bold.resampling import init_bold_std_trans_wf
wf = init_bold_std_trans_wf(
mem_gb=3,
omp_nthreads=1,
spaces=SpatialReferences(
spaces=['MNI152Lin',
('MNIPediatricAsym', {'cohort': '6'})],
checkpoint=True),
)
Parameters
----------
mem_gb : :obj:`float`
Size of BOLD file in GB
omp_nthreads : :obj:`int`
Maximum number of threads an individual process may use
spaces : :py:class:`~niworkflows.utils.spaces.SpatialReferences`
A container for storing, organizing, and parsing spatial normalizations. Composed of
:py:class:`~niworkflows.utils.spaces.Reference` objects representing spatial references.
Each ``Reference`` contains a space, which is a string of either TemplateFlow template IDs
(e.g., ``MNI152Lin``, ``MNI152NLin6Asym``, ``MNIPediatricAsym``), nonstandard references
(e.g., ``T1w`` or ``anat``, ``sbref``, ``run``, etc.), or a custom template located in
the TemplateFlow root directory. Each ``Reference`` may also contain a spec, which is a
dictionary with template specifications (e.g., a specification of ``{'resolution': 2}``
would lead to resampling on a 2mm resolution of the space).
name : :obj:`str`
Name of workflow (default: ``bold_std_trans_wf``)
use_compression : :obj:`bool`
Save registered BOLD series as ``.nii.gz``
use_fieldwarp : :obj:`bool`
Include SDC warp in single-shot transform from BOLD to MNI
Inputs
------
anat2std_xfm
List of anatomical-to-standard space transforms generated during
spatial normalization.
bold_mask
Skull-stripping mask of reference image
bold_split
Individual 3D volumes, not motion corrected
fieldwarp
a :abbr:`DFM (displacements field map)` in ITK format
hmc_xforms
List of affine transforms aligning each volume to ``ref_image`` in ITK format
bold2anat
Affine transform from ``ref_bold_brain`` to T1 space (ITK format)
name_source
BOLD series NIfTI file
Used to recover original information lost during processing
templates
List of templates that were applied as targets during
spatial normalization.
Outputs
-------
bold_std
BOLD series, resampled to template space
bold_std_ref
Reference, contrast-enhanced summary of the BOLD series, resampled to template space
bold_mask_std
BOLD series mask in template space
template
Template identifiers synchronized correspondingly to previously
described outputs.
"""
workflow = Workflow(name=name)
output_references = spaces.cached.get_spaces(nonstandard=False, dim=(3,))
std_vol_references = [
(s.fullname, s.spec) for s in spaces.references if s.standard and s.dim == 3
]
if len(output_references) == 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into standard space,
generating a *preprocessed BOLD run in {tpl} space*.
""".format(
tpl=output_references[0]
)
elif len(output_references) > 1:
workflow.__desc__ = """\
The BOLD time-series were resampled into several standard spaces,
correspondingly generating the following *spatially-normalized,
preprocessed BOLD runs*: {tpl}.
""".format(
tpl=", ".join(output_references)
)
inputnode = pe.Node(
niu.IdentityInterface(
fields=[
"anat2std_xfm",
"bold_mask",
"bold_split",
"fieldwarp",
"hmc_xforms",
"bold2anat",
"name_source",
"templates",
]
),
name="inputnode",
)
iterablesource = pe.Node(
niu.IdentityInterface(fields=["std_target"]), name="iterablesource"
)
# Generate conversions for every template+spec at the input
iterablesource.iterables = [("std_target", std_vol_references)]
split_target = pe.Node(
niu.Function(
function=_split_spec,
input_names=["in_target"],
output_names=["space", "template", "spec"],
),
run_without_submitting=True,
name="split_target",
)
select_std = pe.Node(
KeySelect(fields=["anat2std_xfm"]),
name="select_std",
run_without_submitting=True,
)
select_tpl = pe.Node(
niu.Function(function=_select_template),
name="select_tpl",
run_without_submitting=True,
)
gen_ref = pe.Node(
GenerateSamplingReference(), name="gen_ref", mem_gb=0.3
) # 256x256x256 * 64 / 8 ~ 150MB)
mask_std_tfm = pe.Node(
ApplyTransforms(interpolation="MultiLabel"), name="mask_std_tfm", mem_gb=1
)
ref_std_tfm = pe.Node(
ApplyTransforms(interpolation="LanczosWindowedSinc"), name="ref_std_tfm", mem_gb=1
)
# Write corrected file in the designated output dir
mask_merge_tfms = pe.Node(
niu.Merge(2),
name="mask_merge_tfms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
nxforms = 3 + use_fieldwarp
merge_xforms = pe.Node(
niu.Merge(nxforms),
name="merge_xforms",
run_without_submitting=True,
mem_gb=DEFAULT_MEMORY_MIN_GB,
)
workflow.connect([(inputnode, merge_xforms, [("hmc_xforms", "in%d" % nxforms)])])
if use_fieldwarp:
workflow.connect([(inputnode, merge_xforms, [("fieldwarp", "in3")])])
bold_to_std_transform = pe.Node(
MultiApplyTransforms(
interpolation="LanczosWindowedSinc", float=True, copy_dtype=True
),
name="bold_to_std_transform",
mem_gb=mem_gb * 3 * omp_nthreads,
n_procs=omp_nthreads,
)
merge = pe.Node(Merge(compress=use_compression), name="merge", mem_gb=mem_gb * 3)
# fmt:off
workflow.connect([
(iterablesource, split_target, [('std_target', 'in_target')]),
(iterablesource, select_tpl, [('std_target', 'template')]),
(inputnode, select_std, [('anat2std_xfm', 'anat2std_xfm'),
('templates', 'keys')]),
(inputnode, mask_std_tfm, [('bold_mask', 'input_image')]),
(inputnode, ref_std_tfm, [('bold_mask', 'input_image')]),
(inputnode, gen_ref, [(('bold_split', _first), 'moving_image')]),
(inputnode, merge_xforms, [
(('bold2anat', _aslist), 'in2')]),
(inputnode, merge, [('name_source', 'header_source')]),
(inputnode, mask_merge_tfms, [(('bold2anat', _aslist), 'in2')]),
(inputnode, bold_to_std_transform, [('bold_split', 'input_image')]),
(split_target, select_std, [('space', 'key')]),
(select_std, merge_xforms, [('anat2std_xfm', 'in1')]),
(select_std, mask_merge_tfms, [('anat2std_xfm', 'in1')]),
(split_target, gen_ref, [(('spec', _is_native), 'keep_native')]),
(select_tpl, gen_ref, [('out', 'fixed_image')]),
(merge_xforms, bold_to_std_transform, [('out', 'transforms')]),
(gen_ref, bold_to_std_transform, [('out_file', 'reference_image')]),
(gen_ref, mask_std_tfm, [('out_file', 'reference_image')]),
(mask_merge_tfms, mask_std_tfm, [('out', 'transforms')]),
(gen_ref, ref_std_tfm, [('out_file', 'reference_image')]),
(mask_merge_tfms, ref_std_tfm, [('out', 'transforms')]),
(bold_to_std_transform, merge, [('out_files', 'in_files')]),
])
# fmt:on
output_names = [
"bold_mask_std",
"bold_std",
"bold_std_ref",
"spatial_reference",
"template",
]
poutputnode = pe.Node(
niu.IdentityInterface(fields=output_names), name="poutputnode"
)
# fmt:off
workflow.connect([
# Connecting outputnode
(iterablesource, poutputnode, [
(('std_target', format_reference), 'spatial_reference')]),
(merge, poutputnode, [('out_file', 'bold_std')]),
(ref_std_tfm, poutputnode, [('output_image', 'bold_std_ref')]),
(mask_std_tfm, poutputnode, [('output_image', 'bold_mask_std')]),
(select_std, poutputnode, [('key', 'template')]),
])
# fmt:on
# Connect parametric outputs to a Join outputnode
outputnode = pe.JoinNode(
niu.IdentityInterface(fields=output_names),
name="outputnode",
joinsource="iterablesource",
)
# fmt:off
workflow.connect([
(poutputnode, outputnode, [(f, f) for f in output_names]),
])
# fmt:on
return workflow | 5953ae62d40002283b41b4289fc45b96b50e319c | 9,920 |
def _get_indent(node):
"""Determine the indentation level of ``node``."""
indent = None
while node:
indent = find_first(node, TOKEN.INDENT)
if indent is not None:
indent = indent.value
break
node = node.parent
return indent | ed54eb8c1ea227534af0a3bd8eda9ab9089755d7 | 9,921 |
def distancesarr(image_centroid, object_centroids):
"""gets the distances between image and objects"""
distances = []
j = 0
for row in object_centroids:
distance = centroid_distance(image_centroid, object_centroids, j)
distances.append(distance)
j +=1
return distances | 7abae0c58a2cc672b789d4c8620878b7e3b46375 | 9,922 |
def obs_agent_has_neighbour(agent_id: int, factory: Factory) -> np.ndarray:
"""Does this agent have a neighbouring node?"""
agent: Table = factory.tables[agent_id]
return np.asarray(
[
agent.node.has_neighbour(Direction.up),
agent.node.has_neighbour(Direction.right),
agent.node.has_neighbour(Direction.down),
agent.node.has_neighbour(Direction.left),
]
) | d91b4d7eabcac6ed71149ad9220c2594e5054e36 | 9,923 |
def P_split_prob(b):
"""Returns the probability of b according to the P_split() distribution.
"""
"""n = b.length
if n <= 2:
p = 1.0
else:
k = 1
# si el arbol es binario y n > 2 seguro que tiene que ser splittable.
#while k < n and not b.splittable(k):
while not b.splittable(k):
k += 1
p = (1.0 / float(n)) * gP_split_prob(b, 0, k) * gP_split_prob(b, k, n)
return p"""
return gP_split_prob(b, b.start_index, b.start_index+b.length) | 94577a96e926686107a154aa82d55ceef6b9ab24 | 9,924 |
def t():
"""Or time(). Returns the number of seconds elapsed since the cartridge was run."""
global begin
return py_time.time() - begin | 1b43767629c9585fcd29c1293ee043a189332ed7 | 9,925 |
import sys
def data_provider(data_provider_function, verbose=True):
"""PHPUnit style data provider decorator"""
def test_decorator(test_function):
def new_test_function(self, *args):
i = 0
if verbose:
print("\nTest class : " + get_class_that_defined_method(test_function))
print("Test function: " + test_function.__name__)
for data_set in data_provider_function():
try:
if verbose:
print(" #" + str(i).rjust(2, '0') + ": ", end='')
test_function(self, *data_set)
i += 1
except AssertionError:
if verbose:
print("Failed with data set #%d: " % i, end='', file=sys.stderr)
print(data_set, file=sys.stderr)
raise
else:
if verbose:
print("passed")
if verbose:
print("----------------------------\n")
return new_test_function
return test_decorator | a291b715c18afd9af877dc8d7efa35a48a2c2365 | 9,926 |
import argparse
def main():
"""Console script for ceda_intake."""
parser = argparse.ArgumentParser()
parser.add_argument('--test', dest='test_mode', action='store_true',
help='Create small catalog in test mode')
parser.add_argument('-p', '--project', type=str, required=True,
help='Project catalog to generate')
args = parser.parse_args()
make_intake_catalog(args.project, test_mode=args.test_mode)
return 0 | fe98c133afbdf75fce18e2d21e60d8b3a414ee0f | 9,927 |
from typing import Any
def convert_none(
key: str, attr_type: bool, attr: dict[str, Any] = {}, cdata: bool = False
) -> str:
"""Converts a null value into an XML element"""
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(None)
attrstring = make_attrstring(attr)
return f"<{key}{attrstring}></{key}>" | c04efd6ed52cb092d6987f627b7222668da32dfd | 9,928 |
def is_title(ngram, factor = 2.0):
"""
Define the probability of a ngram to be a title.
Factor is for the confidence coex max.
"""
confidence = 1
to_test = [n for n in ngram if n not in stop_words]
for item in to_test:
if item.istitle(): confidence += factor / len(to_test)
# print item, confidence
return confidence | 678959cdafc966d05b5ef213b0727799f20a8e0f | 9,929 |
import os
import base64
def img_to_b64(img_path):
"""显示一副图片"""
assert os.path.isfile(img_path)
with open(img_path, 'rb') as f:
img = f.read()
b64 = base64.b64encode(img)
return b64 | 6ab67dc503c7bf077fb8772b1c5708eb10efe7e7 | 9,930 |
def ul(microliters):
"""Unicode function name for creating microliter volumes"""
if isinstance(microliters,str) and ':' in microliters:
return Unit(microliters).to('microliter')
return Unit(microliters,"microliter") | 4d5d489191166a76e02cdc0211d52bec45cd65e1 | 9,931 |
def read_glh(filename):
"""
Read glitch parameters.
Parameters
----------
filename : str
Name of file to read
Returns
-------
glhParams : array
Array of median glitch parameters
glhCov : array
Covariance matrix
"""
# Extract glitch parameters
glhFit = np.genfromtxt(filename, skip_header=3)
glhParams = np.zeros(3)
glhParams[0] = np.median(glhFit[:, 8])
glhParams[1] = np.median(glhFit[:, 4])
glhParams[2] = np.median(glhFit[:, 5])
# Compute covariance matrix
tmpFit = np.zeros((len(glhFit[:, 0]), 3))
tmpFit[:, 0] = glhFit[:, 8]
tmpFit[:, 1] = glhFit[:, 4]
tmpFit[:, 2] = glhFit[:, 5]
glhCov = MinCovDet().fit(tmpFit).covariance_
# iglhCov = np.linalg.pinv(glhCov, rcond=1e-8)
return glhParams, glhCov | 6948e0f5571c6d5f7a62dad1fb136cec48e476ae | 9,932 |
def update_user_group(user_group_id, name, **options):
"""
Update a user group
:param user_group_id: The id of the user group to update
:type user_group_id: str
:param name: Name of the user group
:type name: str, optional
:param options: Generic advanced options dict, see online documentation
:type options: dict, optional
:return: The updated group
:rtype: dict
"""
uri = [USER_GROUPS_SUB_PATH, user_group_id]
params = {"name": name}
return _call_account_api("put", uri, params, **options) | 20784b935675c459b7dc258c210aedd86d7b4fb9 | 9,933 |
def longitudinal_kmeans(X, n_clusters=5, var_reg=1e-3,
fixed_clusters=True, random_state=None):
"""Longitudinal K-Means Algorithm (Genolini and Falissard, 2010)"""
n_time_steps, n_nodes, n_features = X.shape
# vectorize latent positions across time
X_vec = np.moveaxis(X, 0, -1).reshape(n_nodes, n_time_steps * n_features)
# perform normal k-means on the vectorized features
kmeans = KMeans(n_clusters=n_clusters,
random_state=random_state).fit(X_vec)
# this method assigns a single cluster to each point across time.
labels = kmeans.labels_.reshape(-1, 1)
labels = np.hstack([labels] * n_time_steps).T
# un-vectorize centers, shape (n_time_steps, n_centers, n_features)
centers_vec = kmeans.cluster_centers_
if fixed_clusters:
centers = np.empty((n_clusters, n_features))
for k in range(n_clusters):
muk = centers_vec[k].reshape(-1, n_time_steps).T
centers[k] = muk.mean(axis=0) # average position overtime
else:
centers = np.empty((n_time_steps, n_clusters, n_features))
for k in range(n_clusters):
centers[:, k] = centers_vec[k].reshape(-1, n_time_steps).T
# calculate cluster variances (assumed spherical and constant over-time)
variances = np.zeros(n_clusters, dtype=np.float64)
for k in range(n_clusters):
for t in range(n_time_steps):
variances[k] += np.var(X[t][labels[t] == k], axis=0).mean()
variances[k] /= n_time_steps
# clusters with a single data point will have zero-variance.
# assign a fudge factor in this case
variances[variances == 0.] = var_reg
return centers, variances, labels | a76581a7784480fa90afa9ab9e080a09ce5662f4 | 9,934 |
import decimal
def do_payment(
checkout_data, # Dict[str, str]
parsed_checkout, # Dict[str, str]
enable_itn, # type: bool
): # type: (...) -> Dict[str, str]
"""
Common test helper: do a payment, and assert results.
This takes a checkout's data and page parse (for session info and assertions).
This will enable and verify ITN processing if `enable_itn` is true.
Return the payment confirmation page's parse.
"""
def _post_payment(): # type: () -> requests.Response
return post_sandbox_payment(
parsed_checkout['session_type'],
parsed_checkout['session_id'],
parsed_checkout['payment_method'],
)
if enable_itn:
require_itn_configured()
with itn_handler(ITN_HOST, ITN_PORT) as itn_queue: # type: Queue
response = _post_payment()
itn_data = itn_queue.get(timeout=2)
else:
response = _post_payment()
parsed_payment = parse_payfast_page(response)
assert {
'payment_summary': parsed_checkout['payment_summary'],
'notice': 'Your payment was successful\n'
} == parsed_payment
if enable_itn:
# Check the ITN result.
# Expect whitespace-stripped versions of the checkout data.
expected = {name: value.strip(api.CHECKOUT_SIGNATURE_IGNORED_WHITESPACE)
for (name, value) in checkout_data.items()}
expected_amount_gross = '{:.2f}'.format(decimal.Decimal(checkout_data['amount'].strip()))
expected_signature = api.itn_signature(itn_data)
assert {
'm_payment_id': expected.get('m_payment_id', ''),
'pf_payment_id': itn_data.get('pf_payment_id', 'MISSING'),
'payment_status': 'COMPLETE',
'item_name': expected.get('item_name', 'MISSING'),
'item_description': expected.get('item_description', ''),
'amount_gross': expected_amount_gross,
'amount_fee': itn_data.get('amount_fee', 'MISSING'),
'amount_net': itn_data.get('amount_net', 'MISSING'),
'custom_str1': expected.get('custom_str1', ''),
'custom_str2': expected.get('custom_str2', ''),
'custom_str3': expected.get('custom_str3', ''),
'custom_str4': expected.get('custom_str4', ''),
'custom_str5': expected.get('custom_str5', ''),
'custom_int1': expected.get('custom_int1', ''),
'custom_int2': expected.get('custom_int2', ''),
'custom_int3': expected.get('custom_int3', ''),
'custom_int4': expected.get('custom_int4', ''),
'custom_int5': expected.get('custom_int5', ''),
# The sandbox seems to fix these names, rather than using the checkout submission data.
'name_first': 'Test',
'name_last': 'User 01',
'email_address': expected.get('email_address', '[email protected]'),
'merchant_id': '10000100',
'signature': expected_signature,
} == itn_data
return parsed_payment | f69383f779ce68ef28ced79d794479a4e3a4dff9 | 9,935 |
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available.
This returns the available version of sphinx-astropy as well as any
paths that should be added to sys.path for sphinx-astropy to be available.
"""
# We've split out the Sphinx part of astropy-helpers into sphinx-astropy
# but we want it to be auto-installed seamlessly for anyone using
# build_docs. We check if it's already installed, and if not, we install
# it to a local .eggs directory and add the eggs to the path (these
# have to each be added to the path, we can't add them by simply adding
# .eggs to the path)
sys_path_inserts = []
sphinx_astropy_version = None
try:
except ImportError:
raise ImportError("sphinx-astropy needs to be installed to build "
"the documentation.")
return sphinx_astropy_version, sys_path_inserts | f20911b11beaf3483d1f2f829c63d654cb0557ef | 9,936 |
def SPEED_OF_LIGHT():
"""
The `SPEED_OF_LIGHT` function returns the speed of light in vacuum
(unit is ms-1) according to the IERS numerical standards (2010).
"""
return 299792458.0 | 5f0b6e6fb81018983d541a6492eb2c5aac258ff6 | 9,937 |
from typing import Optional
import abc
import math
from typing import Tuple
def odd_factory(NATIVE_TYPE): # pylint: disable=invalid-name
"""
Produces a Factory for OddTensors with underlying tf.dtype NATIVE_TYPE.
"""
assert NATIVE_TYPE in (tf.int32, tf.int64)
class Factory:
"""
Represents a native integer data type. It is currently not considered for
general use, but only to support subprotocols of SecureNN.
One value of the native dtype is removed in order to obtain an odd modulus.
More concretely, this data type wraps either tf.int32 or tf.int64 but
removes -1, which is instead mapped to 0.
"""
def tensor(self, value):
"""
Wrap `value` in this data type, performing type conversion as needed.
Internal use should consider explicit construction as an optimization that
avoids redundant correction.
"""
if isinstance(value, tf.Tensor):
if value.dtype is not NATIVE_TYPE:
value = tf.cast(value, dtype=NATIVE_TYPE)
# no assumptions are made about the tensor here and hence we need to
# apply our mapping for invalid values
value = _map_minusone_to_zero(value, NATIVE_TYPE)
return OddDenseTensor(value)
raise TypeError("Don't know how to handle {}".format(type(value)))
def constant(self, value):
raise NotImplementedError()
def variable(self, initial_value):
raise NotImplementedError()
def placeholder(self, shape):
raise NotImplementedError()
@property
def modulus(self):
if NATIVE_TYPE is tf.int32:
return 2**32 - 1
if NATIVE_TYPE is tf.int64:
return 2**64 - 1
raise NotImplementedError(("Incorrect native type ",
"{}.".format(NATIVE_TYPE)))
@property
def native_type(self):
return NATIVE_TYPE
def sample_uniform(self,
shape,
minval: Optional[int] = None,
maxval: Optional[int] = None):
"""Sample a tensor from a uniform distribution."""
assert minval is None
assert maxval is None
if secure_random.supports_seeded_randomness():
seed = secure_random.secure_seed()
return OddUniformTensor(shape=shape, seed=seed)
if secure_random.supports_secure_randomness():
sampler = secure_random.random_uniform
else:
sampler = tf.random_uniform
value = _construct_value_from_sampler(sampler=sampler, shape=shape)
return OddDenseTensor(value)
def sample_bounded(self, shape, bitlength: int):
raise NotImplementedError()
def stack(self, xs: list, axis: int = 0):
raise NotImplementedError()
def concat(self, xs: list, axis: int):
raise NotImplementedError()
master_factory = Factory()
class OddTensor(AbstractTensor):
"""
Base class for the concrete odd tensors types.
Implements basic functionality needed by SecureNN subprotocols from a few
abstract properties implemented by concrete types below.
"""
@property
def factory(self):
return master_factory
@property
@abc.abstractproperty
def value(self) -> tf.Tensor:
pass
@property
@abc.abstractproperty
def shape(self):
pass
def __repr__(self) -> str:
return '{}(shape={}, NATIVE_TYPE={})'.format(
type(self),
self.shape,
NATIVE_TYPE,
)
def __getitem__(self, slc):
return OddDenseTensor(self.value[slc])
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.sub(other)
def add(self, other):
"""Add other to this tensor."""
x, y = _lift(self, other)
bitlength = math.ceil(math.log2(master_factory.modulus))
with tf.name_scope('add'):
# the below avoids redundant seed expansion; can be removed once
# we have a (per-device) caching mechanism in place
x_value = x.value
y_value = y.value
z = x_value + y_value
with tf.name_scope('correct_wrap'):
# we want to compute whether we wrapped around, ie `pos(x) + pos(y) >= m - 1`,
# for correction purposes which, since `m - 1 == 1` for signed integers, can be
# rewritten as:
# -> `pos(x) >= m - 1 - pos(y)`
# -> `m - 1 - pos(y) - 1 < pos(x)`
# -> `-1 - pos(y) - 1 < pos(x)`
# -> `-2 - pos(y) < pos(x)`
wrapped_around = _lessthan_as_unsigned(-2 - y_value,
x_value,
bitlength)
z += wrapped_around
return OddDenseTensor(z)
def sub(self, other):
"""Subtract other from this tensor."""
x, y = _lift(self, other)
bitlength = math.ceil(math.log2(master_factory.modulus))
with tf.name_scope('sub'):
# the below avoids redundant seed expansion; can be removed once
# we have a (per-device) caching mechanism in place
x_value = x.value
y_value = y.value
z = x_value - y_value
with tf.name_scope('correct-wrap'):
# we want to compute whether we wrapped around, ie `pos(x) - pos(y) < 0`,
# for correction purposes which can be rewritten as
# -> `pos(x) < pos(y)`
wrapped_around = _lessthan_as_unsigned(x_value, y_value, bitlength)
z -= wrapped_around
return OddDenseTensor(z)
def bits(self, factory=None):
if factory is None:
return OddDenseTensor(binarize(self.value))
return factory.tensor(binarize(self.value))
def cast(self, factory):
if factory is master_factory:
return self
return factory.tensor(self.value)
class OddDenseTensor(OddTensor):
"""
Represents a tensor with explicit values, as opposed to OddUniformTensor
with implicit values.
Internal use only and assume that invalid values have already been mapped.
"""
def __init__(self, value):
assert isinstance(value, tf.Tensor)
self._value = value
@property
def value(self) -> tf.Tensor:
return self._value
@property
def shape(self):
return self._value.shape
class OddUniformTensor(OddTensor):
"""
Represents a tensor with uniform values defined implicitly through a seed.
Internal use only.
"""
def __init__(self, shape, seed):
self._seed = seed
self._shape = shape
@property
def shape(self):
return self._shape
@property
def value(self) -> tf.Tensor:
# TODO(Morten) result should be stored in a (per-device) cache
with tf.name_scope('expand-seed'):
sampler = partial(secure_random.seeded_random_uniform, seed=self._seed)
value = _construct_value_from_sampler(sampler=sampler,
shape=self._shape)
return value
def _lift(x, y) -> Tuple[OddTensor, OddTensor]:
"""
Attempts to lift x and y to compatible OddTensors for further processing.
"""
if isinstance(x, OddTensor) and isinstance(y, OddTensor):
assert x.factory == y.factory, "Incompatible types: {} and {}".format(
x.factory, y.factory)
return x, y
if isinstance(x, OddTensor):
if isinstance(y, int):
return x, x.factory.tensor(np.array([y]))
if isinstance(y, OddTensor):
if isinstance(x, int):
return y.factory.tensor(np.array([x])), y
raise TypeError("Don't know how to lift {} {}".format(type(x), type(y)))
def _construct_value_from_sampler(sampler, shape):
"""Sample from sampler and correct for the modified dtype."""
# to get uniform distribution over [min, max] without -1 we sample
# [min+1, max] and shift negative values down by one
unshifted_value = sampler(shape=shape,
dtype=NATIVE_TYPE,
minval=NATIVE_TYPE.min + 1,
maxval=NATIVE_TYPE.max)
value = tf.where(unshifted_value < 0,
unshifted_value + tf.ones(shape=unshifted_value.shape,
dtype=unshifted_value.dtype),
unshifted_value)
return value
def _lessthan_as_unsigned(x, y, bitlength):
"""
Performs comparison `x < y` on signed integers *as if* they were unsigned,
e.g. `1 < -1`. Taken from Section 2-12, page 23, of
[Hacker's Delight](https://www.hackersdelight.org/).
"""
with tf.name_scope('unsigned-compare'):
not_x = tf.bitwise.invert(x)
lhs = tf.bitwise.bitwise_and(not_x, y)
rhs = tf.bitwise.bitwise_and(tf.bitwise.bitwise_or(not_x, y), x - y)
z = tf.bitwise.right_shift(tf.bitwise.bitwise_or(lhs, rhs), bitlength - 1)
# turn 0/-1 into 0/1 before returning
return tf.bitwise.bitwise_and(z, tf.ones(shape=z.shape, dtype=z.dtype))
def _map_minusone_to_zero(value, native_type):
""" Maps all -1 values to zero. """
zeros = tf.zeros(shape=value.shape, dtype=native_type)
return tf.where(value == -1, zeros, value)
return master_factory | c8e55e3d5ad16568dbdadca9430d890bb4299e5e | 9,938 |
def filter_by_is_awesome(resources):
"""The resources being that is_awesome
Arguments:
resources {[type]} -- A list of resources
"""
return [resource for resource in resources if resource.is_awesome] | 46717a93e75dfed53bba03b5b7f8a5e8b8315876 | 9,939 |
def topograph_image(image, step):
"""
Takes in NxMxC numpy matrix and a step size and a delta
returns NxMxC numpy matrix with contours in each C cell
"""
step_gen = _step_range_gen(step)
new_img = np.array(image, copy=True)
"""step_gen ~ (255, 245, 235, 225,...) """
def myfunc(color):
for tops, bots in window(step_gen, 2):
if (color <= tops) and (color > bots):
return tops
if color > tops:
break
return 0
topograph = np.vectorize(myfunc)
return new_img if step == 1 else topograph(new_img) | c3a340c422bb16de83b132506e975fecf21a335c | 9,940 |
def _etag(cur):
"""Get current history ETag during request processing."""
h_from, h_until = web.ctx.ermrest_history_snaprange
cur.execute(("SELECT _ermrest.tstzencode( GREATEST( %(h_until)s::timestamptz, (" + _RANGE_AMENDVER_SQL + ")) );") % {
'h_from': sql_literal(h_from),
'h_until': sql_literal(h_until),
})
return cur.fetchone()[0] | bd04dca4ef140003c0df867fa258beb5c60c77dd | 9,941 |
import requests
import time
def send_request(url, method='GET', headers=None, param_get=None, data=None):
"""实际发送请求到目标服务器, 对于重定向, 原样返回给用户
被request_remote_site_and_parse()调用"""
final_hostname = urlsplit(url).netloc
dbgprint('FinalRequestUrl', url, 'FinalHostname', final_hostname)
# Only external in-zone domains are allowed (SSRF check layer 2)
if final_hostname not in allowed_domains_set and not developer_temporary_disable_ssrf_prevention:
raise ConnectionAbortedError('Trying to access an OUT-OF-ZONE domain(SSRF Layer 2):', final_hostname)
# set zero data to None instead of b''
if not data:
data = None
prepped_req = requests.Request(
method,
url,
headers=headers,
params=param_get,
data=data,
).prepare()
# get session
if enable_connection_keep_alive:
_session = connection_pool.get_session(final_hostname)
else:
_session = requests.Session()
# Send real requests
parse.time["req_start_time"] = time()
r = _session.send(
prepped_req,
proxies=requests_proxies,
allow_redirects=False,
stream=enable_stream_content_transfer,
verify=not developer_do_not_verify_ssl,
)
# remote request time
parse.time["req_time_header"] = time() - parse.time["req_start_time"]
dbgprint('RequestTime:', parse.time["req_time_header"], v=4)
# Some debug output
# print(r.request.headers, r.headers)
if verbose_level >= 3:
dbgprint(r.request.method, "FinalSentToRemoteRequestUrl:", r.url, "\nRem Resp Stat: ", r.status_code)
dbgprint("RemoteRequestHeaders: ", r.request.headers)
if data:
dbgprint('RemoteRequestRawData: ', r.request.body)
dbgprint("RemoteResponseHeaders: ", r.headers)
return r | b7ab08d5157964d03089c4db1a64b4f6461b3fe9 | 9,942 |
def MakeListOfPoints(charts, bot, test_name, buildername,
buildnumber, supplemental_columns):
"""Constructs a list of point dictionaries to send.
The format output by this function is the original format for sending data
to the perf dashboard.
Args:
charts: A dictionary of chart names to chart data, as generated by the
log processor classes (see process_log_utils.GraphingLogProcessor).
bot: A string which comes from perf_id, e.g. linux-release.
test_name: A test suite name, e.g. sunspider.
buildername: Builder name (for stdio links).
buildnumber: Build number (for stdio links).
supplemental_columns: A dictionary of extra data to send with a point.
Returns:
A list of dictionaries in the format accepted by the perf dashboard.
Each dictionary has the keys "master", "bot", "test", "value", "revision".
The full details of this format are described at http://goo.gl/TcJliv.
"""
results = []
# The master name used for the dashboard is the CamelCase name returned by
# GetActiveMaster(), and not the canonical master name with dots.
master = slave_utils.GetActiveMaster()
for chart_name, chart_data in sorted(charts.items()):
point_id, revision_columns = _RevisionNumberColumns(chart_data, prefix='r_')
for trace_name, trace_values in sorted(chart_data['traces'].items()):
is_important = trace_name in chart_data.get('important', [])
test_path = _TestPath(test_name, chart_name, trace_name)
result = {
'master': master,
'bot': bot,
'test': test_path,
'revision': point_id,
'supplemental_columns': {}
}
# Add the supplemental_columns values that were passed in after the
# calculated revision column values so that these can be overwritten.
result['supplemental_columns'].update(revision_columns)
result['supplemental_columns'].update(
_GetStdioUriColumn(test_name, buildername, buildnumber))
result['supplemental_columns'].update(supplemental_columns)
result['value'] = trace_values[0]
result['error'] = trace_values[1]
# Add other properties to this result dictionary if available.
if chart_data.get('units'):
result['units'] = chart_data['units']
if is_important:
result['important'] = True
results.append(result)
return results | fe903667b0e3a4c381dbcbc3205ba87b2d0ef26b | 9,943 |
import csv
from io import StringIO
def parse_csv(string):
"""
Rough port of wq/pandas.js to Python. Useful for validating CSV output
generated by Django REST Pandas.
"""
if not string.startswith(','):
data = []
for row in csv.DictReader(StringIO(string)):
for key, val in row.items():
try:
row[key] = float(val)
except ValueError:
pass
data.append(row)
return [{
'data': data
}]
reader = csv.reader(StringIO(string))
val_cols = None
val_start = None
id_cols = None
for row in reader:
if row[0] == '' and not val_cols:
val_start = row.count('')
val_cols = row[val_start:]
col_meta = [{} for v in val_cols]
elif row[-1] != '' and val_cols and not id_cols:
key = row[0]
for i, meta in enumerate(row[val_start:]):
col_meta[i].update(**{key: meta})
elif row[-1] == '' and not id_cols:
id_cols = row[:row.index('')]
meta_index = {}
meta_i = 0
datasets = []
for i, ds1 in enumerate(col_meta):
if i in meta_index:
continue
meta_index[i] = meta_i
meta_i += 1
datasets.append(ds1)
if i < len(col_meta):
for j, ds2 in enumerate(col_meta[i + 1:]):
if ds1 == ds2:
meta_index[i + j + 1] = i
for d in datasets:
d['data'] = []
elif val_cols and id_cols:
ids = {
key: val
for key, val in zip(id_cols, row[:len(id_cols)])
}
records = {}
for i, val in enumerate(row[len(id_cols):]):
mi = meta_index[i]
if mi not in records:
data = ids.copy()
else:
data = records[mi]
try:
val = float(val)
except ValueError:
pass
if val != '':
data[val_cols[i]] = val
records[mi] = data
for mi, data in records.items():
datasets[mi]['data'].append(data)
return datasets | bdf32e3ff1a2d63c568200e75d5f694ef5f49ce9 | 9,944 |
def list_system_configurations():
"""
List all the system configuration parameters
Returns:
.. code-block:: python
[
{
"ParameterName": "ParameterValue"
},
...
]
Raises:
500 - ChaliceViewError
"""
try:
print("Listing all the system configuration parameters")
system_table = ddb_resource.Table(SYSTEM_TABLE_NAME)
response = system_table.scan(
ConsistentRead=True
)
configs = response["Items"]
while "LastEvaluatedKey" in response:
response = system_table.scan(
ExclusiveStartKey=response["LastEvaluatedKey"],
ConsistentRead=True
)
configs.extend(response["Items"])
except Exception as e:
print(f"Unable to list the system configuration parameters: {str(e)}")
raise ChaliceViewError(f"Unable to list the system configuration parameters: {str(e)}")
else:
return replace_decimals(configs) | 5989cc6f1bd79e5f7bd4889883dccb7fa9bf1bd4 | 9,945 |
def add_dbnsfp_to_vds(hail_context, vds, genome_version, root="va.dbnsfp", subset=None, verbose=True):
"""Add dbNSFP fields to the VDS"""
if genome_version == "37":
dbnsfp_schema = DBNSFP_SCHEMA_37
elif genome_version == "38":
dbnsfp_schema = DBNSFP_SCHEMA_38
else:
raise ValueError("Invalid genome_version: " + str(genome_version))
expr = convert_vds_schema_string_to_annotate_variants_expr(
root=root,
other_source_fields=dbnsfp_schema,
other_source_root="vds",
)
if verbose:
print(expr)
dbnsfp_vds = read_dbnsfp_vds(hail_context, genome_version, subset=subset)
return vds.annotate_variants_vds(dbnsfp_vds, expr=expr) | f3c652c77858b9e859bd47e48002a1de3d865fa0 | 9,946 |
import bisect
def look_for_time_position(target, source, begin_pos=0):
"""
Given a time stamp, find its position in time series.
If target does NOT exist in source, then return the value of the smallest time point
that is larger than the given target.
Parameters
-------------
target : DateTime obj
A datetime obj to look for
source : list, type=DateTime
A list of DateTime objects to search from
begin_pos : int, default=0
The start position to search. Default to search from the beginning.
Returns
---------------
position : int
The location
"""
# TODO: make use of the unused parameter - begin_pos
for i, t in enumerate(source[begin_pos:]):
if t >= target:
ans = i + begin_pos
# return ans
insert_index = bisect.bisect(source, target, lo=begin_pos)
if insert_index >= len(source):
# print("Error (look_for_time_position): the time is out of scope.")
return -1
return insert_index
"""
# TODO: use binary search to speed up
for i, t in enumerate(source):
if t >= target:
return i
print("Error (look_for_time_position): the time is out of scope.")
return -1
""" | 352e2e2aa0081926894867e0e6b67a5452685323 | 9,947 |
import torch
def get_wav2vec_preds_for_wav(
path_to_wav: str,
model,
processor,
device: torch.device,
bs: int = 8,
loading_step: float = 10,
extra_step: float = 1,
) -> str:
"""
Gets binary predictions for wav file with a wav2vec 2.0 model
Args:
path_to_wav (str): absolute path to wav file
model: a wav2vec 2.0 model
processor: a wav2vec 2.0 processor
device: a torch.device object
bs (int, optional): Batch size. Defaults to 8.
loading_step (float, optional): length of fixed segments. Defaults to 10.
extra_step (float, optional): size of extra step to load before and after.
Defaults to 1.
Returns:
str: binary predictions
"""
def my_collate_fn(batch: list[np.array]) -> list[np.array]:
return [example for example in batch]
dataset = TokenPredDataset(path_to_wav, extra_step, loading_step)
dataloader = DataLoader(
dataset,
batch_size=bs,
shuffle=False,
collate_fn=my_collate_fn,
num_workers=min(cpu_count() // 2, 4),
drop_last=False,
)
# for the extra frames loaded before and after each segment
correction = int(extra_step / dataset.wav2vec_frame_length)
all_preds = []
i = 0
with torch.no_grad():
for wav_batch in iter(dataloader):
tokenized_audio = processor(
wav_batch, return_tensors="pt", padding="longest", sampling_rate=16000
)
input_values = tokenized_audio.input_values.to(device)
attention_mask = tokenized_audio.attention_mask.to(device)
logits = model(input_values, attention_mask=attention_mask).logits
predicted_ids = torch.argmax(logits, dim=-1)
for j, preds in enumerate(predicted_ids.tolist()):
true_length = (
attention_mask[j].cpu().numpy().sum()
/ dataset.sr
/ dataset.wav2vec_frame_length
)
# apply corrections
if i == 0:
preds = preds[:-correction]
true_length -= correction
elif i == len(dataset) - 1:
preds = preds[correction:]
true_length -= correction
else:
preds = preds[correction:-correction]
true_length -= 2 * correction
# remove padding
all_preds.extend(preds[: int(true_length)])
i += 1
tokens_preds = processor.tokenizer.convert_ids_to_tokens(all_preds)
predictions = "".join(["0" if char == "<pad>" else "1" for char in tokens_preds])
return predictions | 2f9abc97559d1853631dcdf79599190714f618c8 | 9,948 |
import jinja2
import sys
def get_template(parsed_args):
"""Initialize jinja2 and return the right template"""
env = jinja2.Environment(
loader=jinja2.PackageLoader(__name__, TEMPLATES_PATH),
trim_blocks=True, lstrip_blocks=True,
)
# Make the missingvalue() function available in the template so that the
# template fails to render if we don't provide the values it needs.
env.globals["missingvalue"] = missingvalue
env.filters["format_hex"] = format_hex
if "arty" in parsed_args.board or "vc707" in parsed_args.board or "vcu118" in parsed_args.board:
template = env.get_template("fpga.cfg")
elif "hifive" in parsed_args.board:
template = env.get_template("hifive.cfg")
else:
print("Board %s is not supported!" %
parsed_args.board, file=sys.stderr)
sys.exit(1)
return template | 2fb7c683cd660a92b542bdfdb973c91c0dafbb10 | 9,949 |
import logging
def get_node_cmd(config):
"""
Get the node CLI call for Least Cost Xmission
Parameters
----------
config : reVX.config.least_cost_xmission.LeastCostXmissionConfig
Least Cost Xmission config object.
Returns
-------
cmd : str
CLI call to submit to SLURM execution.
"""
args = ['-n {}'.format(SLURM.s(config.name)),
'local',
'-cost {}'.format(SLURM.s(config.cost_fpath)),
'-feats {}'.format(SLURM.s(config.features_fpath)),
'-cap {}'.format(SLURM.s(config.capacity_class)),
'-res {}'.format(SLURM.s(config.resolution)),
'-xcfg {}'.format(SLURM.s(config.xmission_config)),
'-gids {}'.format(SLURM.s(config.sc_point_gids)),
'-nn {}'.format(SLURM.s(config.nn_sinks)),
'-buffer {}'.format(SLURM.s(config.clipping_buffer)),
'-bmult {}'.format(SLURM.s(config.barrier_mult)),
'-mw {}'.format(SLURM.s(config.execution_control.max_workers)),
'-o {}'.format(SLURM.s(config.dirout)),
'-log {}'.format(SLURM.s(config.logdir)),
]
if config.log_level == logging.DEBUG:
args.append('-v')
cmd = ('python -m reVX.least_cost_xmission.least_cost_xmission_cli {}'
.format(' '.join(args)))
logger.debug('Submitting the following cli call:\n\t{}'.format(cmd))
return cmd | b81fb7d6c06122122a8a4ec1cc2b5bc4724948a6 | 9,950 |
def header_lines(filename):
"""Read the first five lines of a file and return them as a list of strings."""
with open(filename, mode='rb') as f:
return [f.readline().decode().rstrip() for _ in range(5)] | 35056152c1566ea2d14452308f00d6903b6e4dff | 9,951 |
import sys
from meerschaum.utils.debug import dprint
def deactivate_venv(
venv: str = 'mrsm',
color : bool = True,
debug: bool = False
) -> bool:
"""
Remove a virtual environment from sys.path (if it's been activated).
"""
global active_venvs
if venv is None:
return True
if debug:
dprint(f"Deactivating virtual environment '{venv}'...", color=color)
if venv in active_venvs:
_locks['active_venvs'].acquire()
active_venvs.remove(venv)
_locks['active_venvs'].release()
if sys.path is None:
return False
target = venv_target_path(venv, debug=debug)
_locks['sys.path'].acquire()
if str(target) in sys.path:
sys.path.remove(str(target))
_locks['sys.path'].release()
if debug:
dprint(f'sys.path: {sys.path}', color=color)
return True | 6938497201d2e8f78b64b709a1e7b9191048e1c7 | 9,952 |
async def load_last_cotd(chat_id: int):
"""Load the time when the user has last received his card of the day.
Args:
chat_id (int): user chat_id
"""
QUERY = "SELECT last_cotd FROM users WHERE id = %(id)s"
async with aconn.cursor() as cur:
await cur.execute(QUERY, {"id": chat_id})
record = await cur.fetchone()
return record[0] if record else None | 2e2aabc18a014e9f96fee91f6e8d85b875edcf2a | 9,953 |
from pathlib import Path
import json
import torch
def load_model(targets, model_name='umxhq', device='cpu'):
"""
target model path can be either <target>.pth, or <target>-sha256.pth
(as used on torchub)
"""
model_path = Path(model_name).expanduser()
if not model_path.exists():
raise NotImplementedError
else:
# load model from disk
with open(Path(model_path, str(len(targets)) + '.json'), 'r') as stream:
results = json.load(stream)
target_model_path = Path(model_path) / "model.pth"
state = torch.load(
target_model_path,
map_location=device
)
max_bin = utils.bandwidth_to_max_bin(
44100,
results['args']['nfft'],
results['args']['bandwidth']
)
unmix = model.OpenUnmixSingle(
n_fft=results['args']['nfft'],
n_hop=results['args']['nhop'],
nb_channels=results['args']['nb_channels'],
hidden_size=results['args']['hidden_size'],
max_bin=max_bin
)
unmix.load_state_dict(state)
unmix.stft.center = True
unmix.eval()
unmix.to(device)
print('loadmodel function done')
return unmix | 8fdafa6ac28ed2277337dc1f3ded295668963c8a | 9,954 |
from typing import Callable
from typing import Optional
from typing import Tuple
from typing import List
import scipy
def model_gradient_descent(
f: Callable[..., float],
x0: np.ndarray,
*,
args=(),
rate: float = 1e-1,
sample_radius: float = 1e-1,
n_sample_points: int = 100,
n_sample_points_ratio: Optional[float] = None,
rate_decay_exponent: float = 0.0,
stability_constant: float = 0.0,
sample_radius_decay_exponent: float = 0.0,
tol: float = 1e-8,
known_values: Optional[Tuple[List[np.ndarray], List[float]]] = None,
max_iterations: Optional[int] = None,
max_evaluations: Optional[int] = None) -> scipy.optimize.OptimizeResult:
"""Model gradient descent algorithm for black-box optimization.
The idea of this algorithm is to perform gradient descent, but estimate
the gradient using a surrogate model instead of, say, by
finite-differencing. The surrogate model is a least-squared quadratic
fit to points sampled from the vicinity of the current iterate.
This algorithm works well when you have an initial guess which is in the
convex neighborhood of a local optimum and you want to converge to that
local optimum. It's meant to be used when the function is stochastic.
Args:
f: The function to minimize.
x0: An initial guess.
args: Additional arguments to pass to the function.
rate: The learning rate for the gradient descent.
sample_radius: The radius around the current iterate to sample
points from to build the quadratic model.
n_sample_points: The number of points to sample in each iteration.
n_sample_points_ratio: This specifies the number of points to sample
in each iteration as a coefficient of the number of points
required to exactly determine a quadratic model. The number
of sample points will be this coefficient times (n+1)(n+2)/2,
rounded up, where n is the number of parameters.
Setting this overrides n_sample_points.
rate_decay_exponent: Controls decay of learning rate.
In each iteration, the learning rate is changed to the
base learning rate divided by (i + 1 + S)**a, where S
is the stability constant and a is the rate decay exponent
(this parameter).
stability_constant: Affects decay of learning rate.
In each iteration, the learning rate is changed to the
base learning rate divided by (i + 1 + S)**a, where S
is the stability constant (this parameter) and a is the rate decay
exponent.
sample_radius_decay_exponent: Controls decay of sample radius.
tol: The algorithm terminates when the difference between the current
iterate and the next suggested iterate is smaller than this value.
known_values: Any prior known values of the objective function.
This is given as a tuple where the first element is a list
of points and the second element is a list of the function values
at those points.
max_iterations: The maximum number of iterations to allow before
termination.
max_evaluations: The maximum number of function evaluations to allow
before termination.
Returns:
Scipy OptimizeResult
"""
if known_values is not None:
known_xs, known_ys = known_values
known_xs = [np.copy(x) for x in known_xs]
known_ys = [np.copy(y) for y in known_ys]
else:
known_xs, known_ys = [], []
if max_iterations is None:
max_iterations = np.inf
if max_evaluations is None:
max_evaluations = np.inf
n = len(x0)
if n_sample_points_ratio is not None:
n_sample_points = int(
np.ceil(n_sample_points_ratio * (n + 1) * (n + 2) / 2))
_, f = wrap_function(f, args)
res = OptimizeResult()
current_x = np.copy(x0)
res.x_iters = [] # initializes as lists
res.xs_iters = []
res.ys_iters = []
res.func_vals = []
res.model_vals = [None]
res.fun = 0
total_evals = 0
num_iter = 0
converged = False
message = None
while num_iter < max_iterations:
current_sample_radius = (sample_radius /
(num_iter + 1)**sample_radius_decay_exponent)
# Determine points to evaluate
# in ball around current point
new_xs = [np.copy(current_x)] + [
current_x + _random_point_in_ball(n, current_sample_radius)
for _ in range(n_sample_points)
]
if total_evals + len(new_xs) > max_evaluations:
message = 'Reached maximum number of evaluations.'
break
# Evaluate points
res.xs_iters.append(new_xs)
new_ys = [f(x) for x in new_xs]
res.ys_iters.append(new_ys)
total_evals += len(new_ys)
known_xs.extend(new_xs)
known_ys.extend(new_ys)
# Save function value
res.func_vals.append(new_ys[0])
res.x_iters.append(np.copy(current_x))
res.fun = res.func_vals[-1]
# Determine points to use to build model
model_xs = []
model_ys = []
for x, y in zip(known_xs, known_ys):
if np.linalg.norm(x - current_x) < current_sample_radius:
model_xs.append(x)
model_ys.append(y)
# Build and solve model
model_gradient, model = _get_least_squares_model_gradient(
model_xs, model_ys, current_x)
# calculate the gradient and update the current point
gradient_norm = np.linalg.norm(model_gradient)
decayed_rate = (
rate / (num_iter + 1 + stability_constant)**rate_decay_exponent)
# Convergence criteria
if decayed_rate * gradient_norm < tol:
converged = True
message = 'Optimization converged successfully.'
break
# Update
current_x -= decayed_rate * model_gradient
res.model_vals.append(
model.predict([-decayed_rate * model_gradient])[0])
num_iter += 1
if converged:
final_val = res.func_vals[-1]
else:
final_val = f(current_x)
res.func_vals.append(final_val)
if message is None:
message = 'Reached maximum number of iterations.'
res.x_iters.append(current_x)
total_evals += 1
res.x = current_x
res.fun = final_val
res.nit = num_iter
res.nfev = total_evals
res.message = message
return res | d5bd32f21cdc871175c3f4c1601c1da240866e14 | 9,955 |
def index():
"""Show the index."""
return render_template(
"invenio_archivematica/index.html",
module_name=_('Invenio-Archivematica')) | 9c5e62bc29466bd4eae463d1dcd71c0d880fc5f8 | 9,956 |
from sys import path
def get_functions_and_macros_from_doc(pmdk_path):
"""
Returns names of functions and macros in a list based on names of files
from the doc directory.
"""
path_to_functions_and_macros = path.join(pmdk_path, 'doc')
functions_and_macros_from_doc = []
for _, _, files in walk(path_to_functions_and_macros):
for f in files:
if not f.endswith('.3'):
continue
# Files with extension '.3' have the same name as functions and
# macros of PMDK library. 'pmemobj_action' is excluded, because
# it is not a name of the function.
if f.startswith('pmemobj_action'):
continue
if not 'libpmem2' in PMDK_LIBRARIES and f.startswith('pmem2'):
continue
functions_and_macros_from_doc.append(f.split('.')[0])
return functions_and_macros_from_doc | d234c7fb445574522b103ec39f8c478806588fbc | 9,957 |
def kmv_tet_polyset(m, mf, mi):
"""Create the polynomial set for a KMV space on a tetrahedron."""
poly = polynomial_set(3, 1, m)
# TODO: check this
for axes in [(x[0], x[1]), (x[0], x[2]), (x[1], x[2]), (x[1] - x[0], x[2] - x[0])]:
b = axes[0] * axes[1] * (1 - axes[0] - axes[1])
for i in range(mf - 2):
for j in range(mf - 2 - i):
poly.append(x[0] ** i * x[1] ** j * x[2] ** (mf - 3 - i - j) * b)
b = x[0] * x[1] * x[2] * (1 - x[0] - x[1] - x[2])
for i in range(mi - 3):
for j in range(mi - 3 - i):
poly.append(x[0] ** i * x[1] ** j * x[2] ** (mf - 4 - i - j) * b)
return poly | cbf87063e6ae6523acf897d6e03fd288f48e04e6 | 9,958 |
import re
def word_detokenize(tokens):
"""
A heuristic attempt to undo the Penn Treebank tokenization above. Pass the
--pristine-output flag if no attempt at detokenizing is desired.
"""
regexes = [
# Newlines
(re.compile(r'[ ]?\\n[ ]?'), r'\n'),
# Contractions
(re.compile(r"\b(can)\s(not)\b"), r'\1\2'),
(re.compile(r"\b(d)\s('ye)\b"), r'\1\2'),
(re.compile(r"\b(gim)\s(me)\b"), r'\1\2'),
(re.compile(r"\b(gon)\s(na)\b"), r'\1\2'),
(re.compile(r"\b(got)\s(ta)\b"), r'\1\2'),
(re.compile(r"\b(lem)\s(me)\b"), r'\1\2'),
(re.compile(r"\b(mor)\s('n)\b"), r'\1\2'),
(re.compile(r"\b(wan)\s(na)\b"), r'\1\2'),
# Ending quotes
(re.compile(r"([^' ]) ('ll|'re|'ve|n't)\b"), r"\1\2"),
(re.compile(r"([^' ]) ('s|'m|'d)\b"), r"\1\2"),
(re.compile(r'[ ]?”'), r'"'),
# Double dashes
(re.compile(r'[ ]?--[ ]?'), r'--'),
# Parens and brackets
(re.compile(r'([\[\(\{\<]) '), r'\1'),
(re.compile(r' ([\]\)\}\>])'), r'\1'),
(re.compile(r'([\]\)\}\>]) ([:;,.])'), r'\1\2'),
# Punctuation
(re.compile(r"([^']) ' "), r"\1' "),
(re.compile(r' ([?!\.])'), r'\1'),
(re.compile(r'([^\.])\s(\.)([\]\)}>"\']*)\s*$'), r'\1\2\3'),
(re.compile(r'([#$]) '), r'\1'),
(re.compile(r' ([;%:,])'), r'\1'),
# Starting quotes
(re.compile(r'(“)[ ]?'), r'"')
]
text = ' '.join(tokens)
for regexp, substitution in regexes:
text = regexp.sub(substitution, text)
return text.strip() | 577c2ed235aaf889699efc291d2b206a922f1f4a | 9,959 |
def googlenet_paper(pretrained=False, **kwargs):
"""
GoogLeNet Model as given in the official Paper.
"""
kwargs['aux'] = True if 'aux' not in kwargs else kwargs['aux']
kwargs['replace5x5with3x3'] = False if 'replace5x5with3x3' not in kwargs \
else kwargs['replace5x5with3x3']
return get_net(GoogLeNet, pretrained=pretrained, pretrain_url=None,
fname='googlenet', kwargs_net=kwargs, attr='classifier',
inn=1024) | 01eaf2cf89648b334f634e83ca2d774e58970999 | 9,960 |
import argparse
def get_parser():
"""
Parses the command line arguments
.. todo::
Adapter services related to alerts/messaging, and local device/Edge management
:returns: An object with attributes based on the arguments
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(description="Inmarsat Modbus Proxy Adapter for ClearBlade")
parser.add_argument('--url', default='https://platform.clearblade.com',
help="The URL of the ClearBlade Platform the adapter will connect to.")
parser.add_argument('--systemKey', required=True,
help="The System Key of the ClearBlade platform System the adapter will connect to.")
parser.add_argument('--systemSecret', required=True,
help="The System Secret of the ClearBlade plaform System the adapter will connect to.")
parser.add_argument('--deviceName', default=ADAPTER_DEVICE_ID,
help="The id/name of the device that will be used for device \
authentication against the ClearBlade platform or Edge, defined \
within the devices table of the ClearBlade platform.")
parser.add_argument('--deviceKey', required=True,
help="The active key of the device that will be used for device \
authentication against the ClearBlade platform or Edge, defined within \
the Devices table of the ClearBlade platform.")
parser.add_argument('--_slaves', dest='slaves_collection', default=DEVICE_PROXY_CONFIG_COLLECTION,
help="The ClearBlade Collection name with RTU proxy definitions")
parser.add_argument('--data', dest='data_collection', default=DATA_COLLECTION,
help="The ClearBlade Collection name with proxy data")
parser.add_argument('--net', dest='net_if', default='eth0',
help="The physical port of the network listener")
parser.add_argument('--ip', dest='ip_address', default='localhost',
help="The local IP Address the PyModbus server will listen on")
parser.add_argument('--tcp', dest='tcp_port', default=502,
help="The local TCP Port the PyModbus server will listen on")
parser.add_argument('--logLevel', dest='log_level', default='INFO',
choices=['INFO', 'DEBUG'],
help="The level of logging that will be utilized by the adapter.")
parser.add_argument('--heartbeat', dest='heartbeat', default=30,
help="The logging heartbeat interval in seconds.")
# parser.add_argument('--messagingUrl', dest='messagingURL', default='localhost',
# help="The MQTT URL of the ClearBlade Platform or Edge the adapter will connect to.")
#
# parser.add_argument('--messagingPort', dest='messagingPort', default=1883,
# help="The MQTT Port of the ClearBlade Platform or Edge the adapter will connect to.")
#
# parser.add_argument('--topicRoot', dest='adapterTopicRoot', default='modbusProxy',
# help="The root of MQTT topics this adapter will subscribe and publish to.")
#
# parser.add_argument('--deviceProvisionSvc', dest='deviceProvisionSvc', default='',
# help="The name of a service that can be invoked to provision IoT devices \
# within the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceHealthSvc', dest='deviceHealthSvc', default='',
# help="The name of a service that can be invoked to provide the health of \
# an IoT device to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceLogsSvc', dest='deviceLogsSvc', default='',
# help="The name of a service that can be invoked to provide IoT device \
# logging information to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceStatusSvc', dest='deviceStatusSvc', default='',
# help="The name of a service that can be invoked to provide the status of \
# an IoT device to the ClearBlade Platform or Edge.")
#
# parser.add_argument('--deviceDecommissionSvc', dest='deviceDecommissionSvc', default='',
# help="The name of a service that can be invoked to decommission IoT \
# devices within the ClearBlade Platform or Edge.")
return parser | 1765c6021e4ad7175ccc54d27e9c5a71bda645e9 | 9,961 |
import os
def get_prefix(path):
"""Generate a prefix for qresource in function of the passed path.
Args:
path (str): Relative path of a folder of resources from project dir.
Returns;
str: Prefix corresponding to `path`
"""
# Remove finishing separator from path if exist
if path[-1] == os.sep:
path = path[:-1]
# Return the prefix corresponding to the path
return "/" + os.path.basename(path) | e412826cbc0ae631e5084a6619b73912cdd0ce40 | 9,962 |
def is_regex(obj):
"""Cannot do type check against SRE_Pattern, so we use duck typing."""
return hasattr(obj, 'match') and hasattr(obj, 'pattern') | cfd4fc702fb121735f49d4ba61395ce8f6508b1a | 9,963 |
import functools
def GetDefaultScopeLister(compute_client, project=None):
"""Constructs default zone/region lister."""
scope_func = {
compute_scope.ScopeEnum.ZONE:
functools.partial(zones_service.List, compute_client),
compute_scope.ScopeEnum.REGION:
functools.partial(regions_service.List, compute_client),
compute_scope.ScopeEnum.GLOBAL: lambda _: [ResourceStub(name='')]
}
def Lister(scopes, _):
prj = project or properties.VALUES.core.project.Get(required=True)
results = {}
for scope in scopes:
results[scope] = scope_func[scope](prj)
return results
return Lister | 25069007b68a74b26e2767e146c25466b65e3377 | 9,964 |
def find_user(username):
"""
Function that will find a user by their username and return the user
"""
return User.find_by_username(username) | ef036f0df72bbdcb9aa8db519120209e20678e83 | 9,965 |
from typing import List
def filter_by_author(resources: List[Resource], author: Author) -> List[Resource]:
"""The resources by the specified author
Arguments:
resources {List[Resource]} -- A list of resources
"""
return [resource for resource in resources if resource.author == author] | d03673ed8c45f09996e29eb996fc31fa3d073315 | 9,966 |
import time
def cpu_bound_op(exec_time, *data):
"""
Simulation of a long-running CPU-bound operation
:param exec_time: how long this operation will take
:param data: data to "process" (sum it up)
:return: the processed result
"""
logger.info("Running cpu-bound op on {} for {} seconds".format(data, exec_time))
time.sleep(exec_time)
return sum(data) | a52d3e25a75f9c7b0ab680a9ad1cb0e5d40de92a | 9,967 |
def elastic_transform_approx(
img,
alpha,
sigma,
alpha_affine,
interpolation=cv2.INTER_LINEAR,
border_mode=cv2.BORDER_REFLECT_101,
value=None,
random_state=None,
):
"""Elastic deformation of images as described in [Simard2003]_ (with modifications for speed).
Based on https://gist.github.com/erniejunior/601cdf56d2b424757de5
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
"""
if random_state is None:
random_state = np.random.RandomState(1234)
height, width = img.shape[:2]
# Random affine
center_square = np.float32((height, width)) // 2
square_size = min((height, width)) // 3
alpha = float(alpha)
sigma = float(sigma)
alpha_affine = float(alpha_affine)
pts1 = np.float32(
[
center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size,
]
)
pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)
matrix = cv2.getAffineTransform(pts1, pts2)
warp_fn = _maybe_process_in_chunks(
cv2.warpAffine, M=matrix, dsize=(width, height), flags=interpolation, borderMode=border_mode, borderValue=value
)
img = warp_fn(img)
dx = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dx, (17, 17), sigma, dst=dx)
dx *= alpha
dy = random_state.rand(height, width).astype(np.float32) * 2 - 1
cv2.GaussianBlur(dy, (17, 17), sigma, dst=dy)
dy *= alpha
x, y = np.meshgrid(np.arange(width), np.arange(height))
map_x = np.float32(x + dx)
map_y = np.float32(y + dy)
remap_fn = _maybe_process_in_chunks(
cv2.remap, map1=map_x, map2=map_y, interpolation=interpolation, borderMode=border_mode, borderValue=value
)
return remap_fn(img) | 9684847e756e0299be6766b5bb8220e6a1b4fc8d | 9,968 |
def uncompleted_task(request):
""" Make the completed task incomplete if use uncheck the task."""
task_list = TaskList.objects.all()
context = {'task_list': task_list}
if request.POST:
task_is_unchecked = request.POST['task_is_unchecked']
task_unchecked_id = request.POST['task_unchecked_id']
get_task = TaskList.objects.get(pk=task_unchecked_id)
if task_is_unchecked:
get_task.is_completed = False
get_task.save()
if request.is_ajax():
return JsonResponse({'task_is_unchecked': task_is_unchecked,
'task_unchecked_id': task_unchecked_id,
'unchecked_view_text': "From View UnChecked:"}, status=200)
return render(request, 'todos/home.html', context) | 1a12d3fc8cc7dc60b8b26d4c25041ed12abc0cd1 | 9,969 |
def jitter(t, X, amountS):
"""Return a random number (intended as a time offset, i.e. jitter) within the range +/-amountS
The jitter is different (but constant) for any given day in t (epoch secs)
and for any value X (which might be e.g. deviceID)"""
dt = ISO8601.epoch_seconds_to_datetime(t)
dayOfYear = int(dt.strftime("%j"))
year = int(dt.strftime("%Y"))
uniqueValue = year*367+dayOfYear+abs(hash(X)) # Note that hash is implementation-dependent so may give different results on different platforms
rand = utils.hashIt(uniqueValue,100)
sign = int(str(uniqueValue)[0]) < 5
v = (rand / 100.0) * amountS
if sign:
v = -v
return v | db62c4365bf4cbf9d2ed0587c51846a274a691a4 | 9,970 |
import string
def check_DNA(DNA_sequence):
"""Check that we have a DNA sequence without junk"""
#
# Remove all spaces
DNA_sequence=string.replace(DNA_sequence,' ','')
# Upper case
DNA_sequence=string.upper(DNA_sequence)
# Check that we only have DNA bases in the seq
ok=1
garbage={}
DNA_bases=['A','G','C','T']
for letter in DNA_sequence:
if not letter in DNA_bases:
ok=None
garbage[letter]=1
if ok:
return ok, DNA_sequence
return ok,garbage.keys() | d73b8176938716b5c3710055750f05b24eab80a5 | 9,971 |
def read():
"""
read() : Fetches documents from Firestore collection as JSON
warehouse : Return document that matches query ID
all_warehouses : Return all documents
"""
try:
warehouse_id = request.args.get('id')
if warehouse_id:
warehouse = warehouse_ref.document(warehouse_id).get()
return jsonify(warehouse.to_dict()), 200
else:
all_warehouses = [doc.to_dict() for doc in warehouse_ref.stream()]
return jsonify(all_warehouses), 200
except Exception as e:
return f"An Error Occured: {e}" | 17d3622f9f0770edb333907298112487432c7025 | 9,972 |
def transposeC(array, axes=None):
"""
Returns the (conjugate) transpose of the input `array`.
Parameters
----------
array : array_like
Input array that needs to be transposed.
Optional
--------
axes : 1D array_like of int or None. Default: None
If *None*, reverse the dimensions.
Else, permute the axes according to the values given.
Returns
-------
array_t : :obj:`~numpy.ndarray` object
Input `array` with its axes transposed.
Examples
--------
Using an array with only real values returns its transposed variant:
>>> array = np.array([[1, 2.5], [3.5, 5]])
>>> array
array([[ 1. , 2.5],
[ 3.5, 5. ]])
>>> transposeC(array)
array([[ 1. , 3.5],
[ 2.5, 5. ]])
And using an array containing complex values returns its conjugate
transposed:
>>> array = np.array([[1, -2+4j], [7.5j, 0]])
>>> array
array([[ 1.+0.j , -2.+4.j ],
[ 0.+7.5j, 0.+0.j ]])
>>> transposeC(array)
array([[ 1.-0.j , 0.-7.5j],
[-2.-4.j , 0.-0.j ]])
"""
# Take the transpose of the conjugate or the input array and return it
return(np.transpose(np.conjugate(array), axes)) | fecd60d72c4c38dc87d59f430365cefe72f40ef4 | 9,973 |
from typing import List
def _partition_files(files: List[str], num_partitions: int) -> List[List[str]]:
"""Split files into num_partitions partitions of close to equal size"""
id_to_file = defaultdict(list)
for f in files:
id_to_file[_sample_id_from_path(f)[0]].append(f)
sample_ids = np.array(list(id_to_file))
np.random.shuffle(sample_ids)
split_ids = np.array_split(sample_ids, num_partitions)
splits = [
sum((id_to_file[sample_id] for sample_id in split), []) for split in split_ids
]
return [split for split in splits if split] | e9fac329f8e1c1c7682984216c34e7b259776c82 | 9,974 |
import json
from pathlib import Path
import time
import requests
def run_vscode_command(
command: str,
*args: str,
wait_for_finish: bool = False,
expect_response: bool = False,
decode_json_arguments: bool = False,
):
"""Execute command via vscode command server."""
# NB: This is a hack to work around the fact that talon doesn't support
# variable argument lists
args = list(
filter(
lambda x: x is not NotSet,
args,
)
)
if decode_json_arguments:
args = [json.loads(arg) for arg in args]
port_file_path = Path(gettempdir()) / "vscode-port"
original_contents = port_file_path.read_text()
# Issue command to VSCode telling it to update the port file. Because only
# the active VSCode instance will accept keypresses, we can be sure that
# the active VSCode instance will be the one to write the port.
if is_mac:
actions.key("cmd-shift-alt-p")
else:
actions.key("ctrl-shift-alt-p")
# Wait for the VSCode instance to update the port file. This generally
# happens within the first millisecond, but we give it 3 seconds just in
# case.
start_time = time.monotonic()
new_contents = port_file_path.read_text()
sleep_time = 0.0005
while True:
if new_contents != original_contents:
try:
decoded_contents = json.loads(new_contents)
# If we're successful, we break out of the loop
break
except ValueError:
# If we're not successful, we keep waiting; we assume it was a
# partial write from VSCode
pass
time.sleep(sleep_time)
sleep_time *= 2
if time.monotonic() - start_time > 3.0:
raise Exception("Timed out waiting for VSCode to update port file")
new_contents = port_file_path.read_text()
port = decoded_contents["port"]
response = requests.post(
f"http://localhost:{port}/execute-command",
json={
"commandId": command,
"args": args,
"waitForFinish": wait_for_finish,
"expectResponse": expect_response,
},
timeout=(0.05, 3.05),
)
response.raise_for_status()
actions.sleep("25ms")
if expect_response:
return response.json() | 4fa3626f1371c0c03923f37136616fb7055ef9cf | 9,975 |
import threading
def run_with_timeout(proc, timeout, input=None):
"""
Run Popen process with given timeout. Kills the process if it does
not finish in time.
You need to set stdout and/or stderr to subprocess.PIPE in Popen, otherwise
the output will be None.
The returncode is 999 if the process was killed.
:returns: (returncode, stdout string, stderr string)
"""
output = []
def target():
output.extend(proc.communicate(input))
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
killed = False
thread.join(timeout)
if thread.is_alive():
proc.terminate()
killed = True
thread.join()
returncode = proc.returncode
if killed:
returncode = 999
return returncode, output[0], output[1] | 414e18dae8f31b20c472f7da14475f8da5761781 | 9,976 |
def dot(x, y, alpha=0):
"""
Compute alpha = xy + alpha, storing the incremental sum in alpha
x and y can be row and/or column vectors. If necessary, an
implicit transposition happens.
"""
assert type(x) is matrix and len(x.shape) is 2, \
"laff.dot: vector x must be a 2D numpy.matrix"
assert type(y) is matrix and len(y.shape) is 2, \
"laff.dot: vector y must be a 2D numpy.matrix"
if(type(alpha) is matrix): m_alpha, n_alpha = alpha.shape
assert isinstance(alpha,(int,float,complex)) or (m_alpha is 1 and n_alpha is 1), \
"laff.scal: alpha must be a 1 x 1 matrix"
if(type(alpha) is matrix): alpha[0,0] = 0
else: alpha = 0
m_x, n_x = x.shape
m_y, n_y = y.shape
assert m_x is 1 or n_x is 1, "laff.dot: x is not a vector"
assert m_y is 1 or n_y is 1, "laff.dot: y is not a vector"
if m_x is 1 and m_y is 1: # x is a row, y is a row
assert n_x == n_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(n_x): alpha[0,0] += y[0, i] * x[0, i]
else:
for i in range(n_x): alpha += y[0, i] * x[0, i]
elif n_x is 1 and n_y is 1: # x is a column, y is a column
assert m_x == m_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(m_x): alpha[0,0] += y[i, 0] * x[i, 0]
else:
for i in range(m_x): alpha += y[i, 0] * x[i, 0]
elif m_x is 1 and n_y is 1: # x is a row, y is a column
assert n_x == m_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(n_x): alpha[0,0] += y[i, 0] * x[0, i]
else:
for i in range(n_x): alpha += y[i, 0] * x[0, i]
elif n_x is 1 and m_y is 1: # x is a column, y is a row
assert m_x == n_y, "laff.dot: size mismatch between x and y"
if(type(alpha) is matrix):
for i in range(m_x): alpha += y[0, i] * x[i, 0]
else:
for i in range(m_x): alpha += y[0, i] * x[i, 0]
return alpha | 2ef9fd4b02a586e9caff70b75bd598e925608171 | 9,977 |
def load_train_test_data(
train_data_path, label_binarizer, test_data_path=None,
test_size=None, data_format="list"):
"""
train_data_path: path. path to JSONL data that contains text and tags fields
label_binarizer: MultiLabelBinarizer. multilabel binarizer instance used to transform tags
test_data_path: path, default None. path to test JSONL data similar to train_data
test_size: float, default None. if test_data_path not provided, dictates portion to be used as test
data_format: str, default list. controls data are returned as lists or generators for memory efficiency
"""
if data_format == "list":
if test_data_path:
X_train, Y_train, _ = load_data(train_data_path, label_binarizer)
X_test, Y_test, _ = load_data(test_data_path, label_binarizer)
else:
X, Y, _ = load_data(train_data_path, label_binarizer)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, random_state=42, test_size=test_size
)
else:
if test_data_path:
X_train = partial(yield_texts, train_data_path)
Y_train = partial(yield_tags, train_data_path, label_binarizer)
X_test = partial(yield_texts, test_data_path)
Y_test = partial(yield_tags, test_data_path, label_binarizer)
else:
# need to split train / test and shuffle in memory efficient way
raise NotImplementedError
return X_train, X_test, Y_train, Y_test | 51aaf916f948b198e1f25c002655731008c173ed | 9,978 |
import os
def delete_courrier_affaire_view(request):
"""
Supprimer le fichier une fois téléchargé
"""
settings = request.registry.settings
filename = request.params['filename']
temporary_directory = settings["temporary_directory"]
file_path = os.path.join(temporary_directory, filename)
if os.path.exists(file_path):
os.remove(file_path)
return "ok"
else:
raise exc.HTTPNotFound("Le fichier est indisponible") | 96b89ae5ed378b38af5a4426c6b1e9004d215bc9 | 9,979 |
def get_token() -> str:
"""Obtains the Access Token from the Authorization Header"""
# Get the authorization header
authorization_header = request.headers.get("Authorization", None)
# Raise an error if no Authorization error is found
if not authorization_header:
payload = {
"code": "authorization_header_missing",
"description": "Authorization header is expected",
}
raise AuthError(payload, 401)
authorization_header_parts = authorization_header.split()
# We are expecting the Authorization header to contain a Bearer token
if authorization_header_parts[0].lower() != "bearer":
payload = {
"code": "invalid_header",
"description": "Authorization header must be a Bearer token",
}
raise AuthError(payload, 401)
# The Authorization header is prefixed with Bearer, but does not contain the actual token
elif len(authorization_header_parts) == 1:
payload = {"code": "invalid_header", "description": "Token not found"}
raise AuthError(payload, 401)
# We only expect 2 parts, "Bearer" and the access token
elif len(authorization_header_parts) > 2:
payload = {
"code": "invalid_header",
"description": "Authorization header must be a valid Bearer token",
}
raise AuthError(payload, 401)
# If all checks out, we return the access token
return authorization_header_parts[1] | 5e1d05f705ad1c7505963e96c8637e5ab42aff79 | 9,980 |
def load_image(path, grayscale=False):
"""Summary
Args:
path (str): Path to image
grayscale (bool): True loads image as grayscale, False loads image as color
Returns:
numpy.ndarray: Image loaded from path
"""
# TODO: Load image
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) if grayscale else cv2.imread(path)
return img | dd4ec76a2cd057f384b1894bc675c54abcff04a2 | 9,981 |
def convert_op_str(qubit_op_str, op_coeff):
"""
Convert qubit operator into openfermion format
"""
converted_Op=[f'{qOp_str}{qNo_index}' for qNo_index, qOp_str in enumerate(qubit_op_str) if qOp_str !='I']
seperator = ' ' #space
Openfermion_qubit_op = QubitOperator(seperator.join(converted_Op), op_coeff)
return Openfermion_qubit_op | a6a512758a706b3a788f686331747ac9224c2f8b | 9,982 |
def __state_resolving_additional_facts(conversation, message, just_acknowledged):
"""
Bot is asking the user questions to resolve additional facts
:param conversation: The current conversation
:param message: The user's message
:param just_acknowledged: Whether or not an acknowledgement just happened.
Used to skip fact resolution and instead asks a question immediately.
:return: A question to as
"""
question = None
# Retrieve current_fact from conversation
current_fact = conversation.current_fact
if just_acknowledged:
question = Responses.fact_question(current_fact.name)
else:
# Extract entity from message based on current fact
fact_entity_value = __extract_entity(current_fact.name, current_fact.type, message)
if fact_entity_value is not None:
next_fact = fact_service.submit_resolved_fact(conversation, current_fact, fact_entity_value)
new_fact_id = next_fact['fact_id']
new_fact = None
if new_fact_id:
new_fact = db.session.query(Fact).get(new_fact_id)
conversation.current_fact = new_fact
# Additional facts remain to be asked
if fact_service.has_additional_facts(conversation):
# Additional fact limit reached, time for a new prediction
if fact_service.count_additional_facts_resolved(conversation) % MAX_ADDITIONAL_FACTS == 0:
conversation.bot_state = BotState.GIVING_PREDICTION
else:
question = Responses.fact_question(new_fact.name)
else:
# There are no more additional facts! Give a prediction
conversation.bot_state = BotState.GIVING_PREDICTION
return question | d1e75e0d67aa2b1bcc899885c83132a47df015dc | 9,983 |
import json
def load_dataset(path):
"""Load json file and store fields separately."""
with open(path) as f:
data = json.load(f)['data']
output = {'qids': [], 'questions': [], 'answers': [],
'contexts': [], 'qid2cid': []}
for article in data:
for paragraph in article['paragraphs']:
output['contexts'].append(paragraph['context'])
for qa in paragraph['qas']:
output['qids'].append(qa['id'])
output['questions'].append(qa['question'])
output['qid2cid'].append(len(output['contexts']) - 1)
if 'answers' in qa:
output['answers'].append(qa['answers'])
return output | 4ba01f49d6a0aa3329b076fc0de9dd38fb99f2f8 | 9,984 |
def generate_rand_enex_by_prob_nb(shape: tp.Shape,
entry_prob: tp.MaybeArray[float],
exit_prob: tp.MaybeArray[float],
entry_wait: int,
exit_wait: int,
entry_pick_first: bool,
exit_pick_first: bool,
flex_2d: bool,
seed: tp.Optional[int] = None) -> tp.Tuple[tp.Array2d, tp.Array2d]:
"""Pick entries by probability `entry_prob` and exits by probability `exit_prob` one after another.
`entry_prob` and `exit_prob` should be 2-dim arrays of shape `shape`.
Specify `seed` to make output deterministic."""
if seed is not None:
np.random.seed(seed)
temp_idx_arr = np.empty((shape[0],), dtype=np.int_)
return generate_enex_nb(
shape,
entry_wait,
exit_wait,
entry_pick_first,
exit_pick_first,
rand_by_prob_choice_nb, (entry_prob, entry_pick_first, temp_idx_arr, flex_2d),
rand_by_prob_choice_nb, (exit_prob, exit_pick_first, temp_idx_arr, flex_2d)
) | fbb7fc4bcf50139f455049edd4af62e9c0429dd3 | 9,985 |
def bresenham_3d_line_of_sight(observers, targets, raster, obs_height_field,
tar_height_field, radius, raster_crs, fresnel=False):
"""Naive bresenham line of sight algorithm"""
writePoints = []
lines_for_shp = []
start, end = 0, 0
raster_transform = raster.GetGeoTransform()
pixelWidth = raster_transform[1]
pix = pixelWidth
pixelHeight = raster_transform[5]
xOrigin = raster_transform[0]
yOrigin = raster_transform[3]
raster_band = raster.GetRasterBand(1)
info = []
for obs in range(observers.GetFeatureCount()):
observer = observers.GetFeature(obs)
# get Observer point geometry
obs_geom = observer.geometry()
try:
obs_x = obs_geom.GetPoints()[0][0]
obs_y = obs_geom.GetPoints()[0][1]
except ValueError:
debugHere()
# offset x,y values to equivalent raster index values
obs_x_off = int((obs_x - xOrigin) / pixelWidth)
obs_y_off = int((obs_y - yOrigin) / pixelHeight)
mask_x = obs_x - radius
mask_y = obs_y - radius
mask_x_pix = int((mask_x - xOrigin) / pixelWidth)
mask_y_pix = int((mask_y - yOrigin) / pixelHeight)
radius_pix = int(radius / pixelWidth)
mask_width = radius_pix * 2
mask_height = radius_pix * 2
if mask_x_pix < 0: # mask has overflow beyond raster edge
mask_width += mask_x_pix # clip mask width by the overflow
mask_x_pix = 0 # set mask origin x to edge of raster
mask_x = xOrigin
if mask_y_pix < 0:
mask_height += mask_y_pix
mask_y_pix = 0
mask_y = yOrigin
# truncate positive overflow
if mask_width + mask_x_pix > raster_band.XSize:
overflow = raster_band.XSize - (mask_width + mask_x_pix)
mask_width += overflow
if mask_height + mask_y_pix > raster_band.YSize:
overflow = raster_band.YSize - (mask_height + mask_y_pix)
mask_height += overflow
mask_x_pix = int(mask_x_pix)
mask_y_pix = int(mask_y_pix)
mask_width = int(mask_width)
mask_height = int(mask_height)
new_obs_x = obs_x_off - mask_x_pix
new_obs_y = mask_y_pix - obs_y_off
# x_geog, y_geog = raster_x_min + x * pix + pix / 2, raster_y_max - y * pix - pix / 2
# areaOfInterest = QgsRectangle(x_geog - radius, y_geog - radius, x_geog + radius, y_geog + radius)
# set observer height
# Raster used is smaller than radius, so no clipping nescesarry
try:
if raster_band.YSize < radius * 2 or raster_band.YSize < radius * 2:
mask_x = xOrigin
mask_y = yOrigin
new_obs_x = obs_x_off
new_obs_y = obs_y_off
raster_array = raster_band.ReadAsArray().astype(np.float)
else:
raster_array = raster_band.ReadAsArray(mask_x_pix, mask_y_pix, mask_width, mask_height).astype(np.float)
except:
debugHere()
try:
obs_height = observer.items()[obs_height_field]
if obs_height is None:
obs_height = 1.6 # set observer height to person height
z = obs_height + raster_array[new_obs_y, new_obs_x]
except(IndexError, TypeError) as e:
print e
debugHere()
start = (new_obs_y, new_obs_x, z)
writePoints.append([(mask_x, mask_y),
(mask_x, mask_y + (mask_height * pixelHeight)),
(mask_x + (mask_width * pixelWidth) , mask_y),
(mask_x + (mask_width * pixelWidth), mask_y + (mask_height * pixelHeight))])
# raster_crs
for tar in range(targets.GetFeatureCount()):
target_in_radius = True
target = targets.GetFeature(tar)
# get Target point geometry
tar_geom = target.geometry()
x, y = tar_geom.GetPoints()[0]
target_outside_radius = euclidian_distance((obs_x, obs_y), (x, y)) > radius
if target_outside_radius:
continue
# offset x,y values to equivalent raster index values
x = int((x - mask_x) / pixelWidth)
y = int((y - mask_y) / pixelHeight)
# check if target point is out of search area
# if target_outside_radius:
# continue
# get target height
z = target.items()[tar_height_field]
try:
landscape_height = raster_array[y, x]
except IndexError:
target_in_radius = False
continue
# get target height
z = target.items()[tar_height_field] + landscape_height
end = (y, x, z)
# Unpack start/end tuples
x, y, z = start
x2, y2, z2 = end
z_value = z
# Calculate differentials
diff_x = x2 - x
diff_y = y2 - y
diff_z = z2 - z
# Assign incremental slope values for x, y, z
incr_x = -1 if (diff_x < 0) else 1
incr_y = -1 if (diff_y < 0) else 1
incr_z = -1 if (diff_z < 0) else 1
abs_diff_x = abs(diff_x)
abs_diff_y = abs(diff_y)
abs_diff_z = abs(diff_z)
diff_x2 = abs_diff_x * 2
diff_y2 = abs_diff_y * 2
diff_z2 = abs_diff_z * 2
# Find the steepest axis and find line segments accordingly
if (abs_diff_x >= abs_diff_y) and (abs_diff_x >= abs_diff_z):
steepest = 'x'
z_line_length = np.sqrt(pow(diff_x, 2) + pow(diff_z, 2))
z_segment_length = z_line_length / diff_x
elif (abs_diff_y > abs_diff_x) and (abs_diff_y >= abs_diff_z):
steepest = 'y'
z_line_length = np.sqrt(pow(diff_y, 2) + pow(diff_z, 2))
z_segment_length = z_line_length / diff_y
elif (abs_diff_z > abs_diff_x) and (abs_diff_z > abs_diff_y):
steepest = 'z'
z_line_length = np.sqrt(pow(diff_x, 2) + pow(diff_z, 2))
z_segment_length = z_line_length / diff_z
else:
return "Error when finding steepest line"
incr_z_value = np.sqrt(abs(pow(z_segment_length, 2) - pow(1, 2)))
incr_z_value = -incr_z_value if (diff_z < 0) else incr_z_value
xm, ym, zm = (x2 + x) / 2, (y2 + y) / 2, (z2 + z) / 2
zm = z + xm * incr_z_value
mid_fresnel = get_fresnel_radius(z_line_length / 2, z_line_length / 2)
if fresnel:
try:
visibility = zm - mid_fresnel > raster_array[xm, ym]
except:
debugHere()
if not visibility:
lines_for_shp.append(build_return_package(observer, target, visibility))
continue
if 'x' in steepest:
err_1 = diff_y2 - abs_diff_x
err_2 = diff_z2 - abs_diff_x
for i in np.arange(abs_diff_x - 1):
if (err_1 > 0):
y += incr_y
err_1 -= diff_x2
if (err_2 > 0):
z += incr_z
err_2 -= diff_x2
err_1 += diff_y2
err_2 += diff_z2
x += incr_x
z_value += incr_z_value
visibility = z_value > raster_array[x, y]
if not visibility:
break
if 'y' in steepest:
err_1 = diff_x2 - abs_diff_y
err_2 = diff_z2 - abs_diff_y
for i in np.arange(abs_diff_y - 1):
if (err_1 > 0):
x += incr_x
err_1 -= diff_y2
if (err_2 > 0):
z += incr_z
err_2 -= diff_y2
err_1 += diff_x2
err_2 += diff_z2
y += incr_y
z_value += incr_z_value
visibility = z_value > raster_array[x, y]
if not visibility:
break
if 'z' in steepest:
err_1 = diff_y2 - abs_diff_z
err_2 = diff_x2 - abs_diff_z
for i in np.arange(abs_diff_z - 1):
if (err_1 > 0):
y += incr_y
err_1 -= diff_z2
if (err_2 > 0):
x += incr_x
err_2 -= diff_z2
err_1 += diff_y2
err_2 += diff_x2
z += incr_z
z_value += incr_z_value
visibility = z_value > raster_array[x, y]
if not visibility:
break
lines_for_shp.append(build_return_package(observer, target, visibility))
return lines_for_shp | d11cfdf5c9d8f33b86a7dfc1026e6c668aebd3a4 | 9,986 |
import glob
import os
def delete():
"""
Receives requests for deleting certain files at the back-end
"""
path = request.form['path']
files = glob.glob(path)
for f in files:
os.remove(f)
return 'Successfull' | 1ea2eb7dece5b3a1148b48b057a9e28780f52ce1 | 9,987 |
from datetime import datetime
import html
import requests
from textwrap import dedent
def retrieve(last_updated=datetime.now()):
""" Crawls news and returns a list of tweets to publish. """
print('Retrieving {} alzheimer news since {}.'.format(SITE, last_updated))
to_ret = list()
# Get all the content from the last page of the site's news
tree = html.fromstring(requests.get(URL).content)
# Get list of articles
articles = CSSSelector('article')(tree)
for article in articles:
# For each article parse the date on the metadata and compare to the last update of the bot.
# If the article is newer it should go on until it finds one that's not
link = CSSSelector('article .ue-c-cover-content__link')(article)[0].get('href')
if "promo" in link.lower() or "follow" in link.lower():
continue
news_page = html.fromstring(requests.get(link).content)
news_date = CSSSelector('time')(news_page)[0].get('datetime')
news_datetime = datetime.strptime(news_date, '%Y-%m-%dT%H:%M:%SZ')
if news_datetime < last_updated:
break
# Get the useful parts of each article to compose a tweet.
title = CSSSelector('article .ue-c-cover-content__headline')(article)[0].text
author = CSSSelector('.ue-c-article__byline-name a, .ue-c-article__byline-name')(news_page)[0].text
article_body = str(etree.tostring(CSSSelector('.ue-l-article__body')(news_page)[0]))
if "alzheimer" not in article_body.lower():
continue
# Compose a tweet with the article's information
tweet = """
{title}
Autor/a: {author}
Enlace: https:{link} ({site})
""".format(title=title, author=author, link=link, site=SITE)
to_ret.append(dedent(tweet))
# Returns a list of tweets ready for the bot to tweet.
return to_ret | ba194b84a50164ca8a238a77d0e80d5e80c93ae2 | 9,988 |
import binascii
def check_seal(item):
"""
Given a message object, use the "seal" attribute - a cryptographic
signature to prove the provenance of the message - to check it is valid.
Returns a boolean indication of validity.
"""
try:
item_dict = to_dict(item)
raw_sig = item_dict['seal']
signature = binascii.unhexlify(raw_sig.encode('ascii'))
key = rsa.PublicKey.load_pkcs1(item_dict['sender'].encode('ascii'))
del item_dict['seal']
del item_dict['message']
root_hash = _get_hash(item_dict).hexdigest()
return rsa.verify(root_hash.encode('ascii'), signature, key)
except:
pass
return False | 86bb7b22d2efe4e7117b3c65fad2a7dc4853b428 | 9,989 |
def plot_win_prob(times, diff, end_lim, probs, team_abr, bools):
""" This function plots the win probability and
score differential for the game
@param times (list): list containing actual_times
and times. times contains all of the times at
which win probability was calculated
@param diff (list): List of score differentials
corresponding to all times in actual_times
@param end_lim (int): Time at which the last win
probability value is calculated
@param probs (list): List of win probability
lists (probs_home and probs_away). probs_home
contains all of the home win probability
values for all times in the times list.
probs_away is the same, but for win probability
for the away team
@param team_abr (list): List contraining the
home team abbreviation in the first index
and the away team abbreviation in the
second index
@param bools (list): List of booleans controlling
which figures are plotted
Returns:
- fig (matplotlib.figure.Figure): Figure
containing score differential and/or
win probability. None if all of the
booleans are False
"""
actual_times, times = times
probs_home, probs_away = probs
plot_diff, plot_home, plot_away = bools
plt.rcParams["figure.figsize"] = (20,6)
# Score differential
if plot_diff:
fig, pltting = \
plot_score_differential(actual_times,
diff,
end_lim)
else:
fig,ax = plt.subplots()
pltting = ax
# Quarter deliniation
for normal_q in range(0,4):
pltting.plot([2880-normal_q*12*60, 2880-normal_q*12*60],
[0,1], 'gray')
# OT deliniation
for ot in range(0,10):
pltting.plot([-ot*5*60, -ot*5*60],
[0,1], 'gray')
# Win probability
if plot_home:
pltting.plot(times, probs_home, 'blue', label=team_abr[0])
if plot_away:
pltting.plot(times, probs_away, 'orange', label=team_abr[-1])
pltting.set_xlim(2880, end_lim)
pltting.set_ylim(0.0, 1.0)
pltting.set_title("Win Probability")
plt.legend(loc='best')
plt.show()
return fig | 139906b4a2db3cf3a7ffa531ec0701be0d395b13 | 9,990 |
def add_office():
"""Given that i am an admin i should be able to add a political office
When i visit ../api/v2/offices endpoint using POST method"""
if is_admin() is not True:
return is_admin()
errors = []
try:
if not request.get_json(): errors.append(
make_response(jsonify({'status': 409, "message": "missing input data"}), 409))
office_data = request.get_json()
check_missingfields = validate.missing_value_validator(['name', 'type'], office_data)
if check_missingfields is not True:
return check_missingfields
check_emptyfield = validate.empty_string_validator(['name', 'type'], office_data)
if check_emptyfield is not True:
return check_emptyfield
check_if_text_only = validate.text_arrayvalidator(['name', 'type'], office_data)
if check_if_text_only is not True:
return check_if_text_only
office_name = office_data['name']
office_type = office_data['type']
if len(errors) > 0:
for e in errors:
return e
res = office.add_office(office_name, office_type)
return res
except Exception as e:
return make_response(jsonify({'message': "something went wrong " + str(e.args[0]), 'status': 400}), 400) | ee990cb55ca819a1b4fdd2eed4346f7fca21a7c3 | 9,991 |
from pathlib import Path
import click
def send_message(
mg: mailgun.MailGun,
templates: t.Tuple[str, str],
contact_name: str,
contact_email: str,
sender: str,
reply_to: str,
sponsorship_package: t.Optional[Path],
dry_run: bool,
) -> bool:
"""
Send an individual email and report if it was successful
:param mg: the MailGun instance
:param templates: the text and html templates respectively
:param contact_name: the name of the contact at the company
:param contact_email: the email of the contact at the company
:param sender: the name of the person sending the email
:param reply_to: the email which replies are directed to
:param sponsorship_package: an optional file for the sponsorship package
:param dry_run: whether to actually send the email
:return: whether the sending was successful
"""
text, html = templates
# Format the sender email
sender_email = f"{sender[0]}{sender[sender.index(' ') + 1:].replace('-', '')}@{mg.domain}".lower()
# Get and format the contact email(s)
pairs = getaddresses([contact_email.replace(" ", "")])
emails = []
for _, email in pairs:
if email == "":
logger.error(f'invalid email address found in "{contact_email}"')
return False
emails.append(f"{contact_name} <{email.lower()}>")
# Print out the content on dry runs
if dry_run:
click.echo(
f"To: {', '.join(emails)}\n"
f"From: {sender} <{sender_email}>\n"
f"Subject: WaffleHacks Sponsorship Opportunity\n"
f"Reply To: {reply_to}\n\n\n"
f"{text}",
file=open(f"./dry-run-out/{contact_name} - {uuid4()}", "w"),
)
return True
try:
# Open the package if necessary
files = []
if sponsorship_package:
files.append(sponsorship_package.open("rb"))
mg.send(
from_=f"{sender} <{sender_email}>",
to=emails,
subject="WaffleHacks Sponsorship Opportunity",
text=text,
html=html,
files=files,
headers={"Reply-To": reply_to},
)
except mailgun.MailGunException as e:
logger.error(f"failed to send message: {e}")
return False
return True | 62d03de5fa7a3c579ff2351e2c4623b3bf0e8a8e | 9,992 |
def multi_class5_classification_dataset_sparse_labels() -> tf.data.Dataset:
"""
TensorFlow dataset instance with multi-class sparse labels (5 classes)
:return: Multi-class sparse (labels) classification dataset
"""
# Create features
X = tf.random.normal(shape=(100, 3))
# Create one multi-class (one hot) labels
y = tf.random.uniform(minval=0, maxval=5, dtype=tf.int32, shape=(100,))
return tf.data.Dataset.from_tensor_slices((X, y)) | 05bd5f809e08fde21270c286351ed32b9ed2cb97 | 9,993 |
def skipIfNAN(proteinPath):
""" Test if there is a NAN (not a number) in the lists """
overlapArrayWhole = None
overlapArrayInterface = None
overlapTApproxWhole = None
overlapTApproxInterface = None
try:
overlapArrayWhole = np.loadtxt(proteinPath+"overlapArrayWhole.txt")
except IOError:
pass
try:
overlapArrayInterface = np.loadtxt(proteinPath+"overlapArrayInterface.txt")
except IOError:
pass
try:
overlapTApproxWhole = np.loadtxt(proteinPath+"overlapTApproxWhole.txt")
except IOError:
pass
try:
overlapTApproxInterface = np.loadtxt(proteinPath+"overlapTApproxInterface.txt")
except IOError:
pass
if overlapArrayWhole is not None and np.isnan(overlapArrayWhole).any():
print "skipped"
return True
if overlapArrayInterface is not None and np.isnan(overlapArrayInterface).any():
print "skipped"
return True
if overlapTApproxWhole is not None and np.isnan(overlapTApproxWhole).any():
print "skipped"
return True
if overlapTApproxInterface is not None and np.isnan(overlapTApproxInterface).any():
print "skipped"
return True
return False | 0993fe55879e2c965b9856435e38f3a33d803e33 | 9,994 |
import json
def alignment_view(request, project_uid, alignment_group_uid):
"""View of a single AlignmentGroup.
"""
project = get_object_or_404(Project, owner=request.user.get_profile(),
uid=project_uid)
alignment_group = get_object_or_404(AlignmentGroup,
reference_genome__project=project, uid=alignment_group_uid)
# Initial javascript data.
init_js_data = json.dumps({
'project': adapt_model_instance_to_frontend(project),
'alignment_group': adapt_model_instance_to_frontend(alignment_group)
})
context = {
'project': project,
'tab_root': TAB_ROOT__DATA,
'alignment_group': alignment_group,
'experiment_sample_to_alignment_list_json': adapt_model_to_frontend(
ExperimentSampleToAlignment,
{'alignment_group': alignment_group}),
'init_js_data': init_js_data,
'flag_genome_finishing_enabled': int(settings.FLAG__GENOME_FINISHING_ENABLED)
}
return render(request, 'alignment.html', context) | 50f9420dca7c939524e1e243d667ddd76d7687d0 | 9,995 |
from typing import Optional
async def get_latest_disclosure(compass_id: int, api: ci.CompassInterface = Depends(ci_user)) -> Optional[ci.MemberDisclosure]:
"""Gets the latest disclosure for the member given by `compass_id`."""
logger.debug(f"Getting /{{compass_id}}/latest-disclosure for {api.user.membership_number}")
async with error_handler:
return api.people.latest_disclosure(compass_id) | d5cce38ecce559b9bd9ac8306f88e32da1d2b4c6 | 9,996 |
def get_matching_string(matches, inputText, limit=0.99):
"""Return the matching string with all of the license IDs matched with the input license text if none matches then it returns empty string.
Arguments:
matches {dictionary} -- Contains the license IDs(which matched with the input text) with their respective sorensen dice score as valus.
limit {float} -- limit at which we will consider the match as a perfect match.
inputText {string} -- license text input by the user.
Returns:
string -- matching string containing the license IDs that actually matched else returns empty string.
"""
if not matches:
matchingString = 'There is not enough confidence threshold for the text to match against the SPDX License database.'
return matchingString
elif 1.0 in matches.values() or all(limit < score for score in matches.values()):
matchingString = 'The following license ID(s) match: ' + ", ".join(matches.keys())
return matchingString
else:
for licenseID in matches:
listedLicense = getListedLicense(licenseID)
isTextStandard = checkTextStandardLicense(listedLicense, inputText)
if not isTextStandard:
matchingString = 'The following license ID(s) match: ' + licenseID
return matchingString
else:
return '' | be0fe152e530ec8244f892bfb4887b78bf89027b | 9,997 |
def get_review(annotation):
"""
Get annotation's review (if exists).
"""
try:
review = Comment.objects.get(annotation=annotation)
return review
except Comment.DoesNotExist:
return None | 89aeee2dc8811c57265ebbc30ede9cfafcd5e696 | 9,998 |
def load_grid(grdfiles, blocks, dimpart, nsigma, **kwargs):
"""Setup a `grid` by reading `grdfiles` on `blocks`
"""
ncgrid = nct.MDataset(grdfiles, blocks, dimpart, **kwargs)
# dummy time, to be updated later
time = ma.Marray(np.arange(10), dims=tdims)
lat = nct.readmarray(ncgrid, "lat_rho", hdims)
lon = nct.readmarray(ncgrid, "lon_rho", hdims)
if ncgrid.halow > 0:
halow = ncgrid.halow
# extend lon-lat on the halow=1 to infer correctly @ f-points
fill_halo(lat, halow)
fill_halo(lon, halow)
depth = nct.readmarray(ncgrid, "h", hdims)
angle = nct.readmarray(ncgrid, "angle", hdims)
mask = nct.readmarray(ncgrid, "mask_rho", hdims)
pm = nct.readmarray(ncgrid, "pm", hdims)
pn = nct.readmarray(ncgrid, "pn", hdims)
f = nct.readmarray(ncgrid, "f", hdims)
sigma = ma.Marray((np.arange(nsigma)+0.5)/nsigma, dims=vdims)
coords = {"t": time, "sigma": sigma, "eta": lat, "xi": lon}
return gr.Grid(coords, dims, depth=depth, angle=angle, mask=mask, pm=pm, pn=pn, f=f, **kwargs) | 08d102c4a1ef163e2af4801d7ffe2b572b747a58 | 9,999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.