content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def parse_c45(file_base, rootdir='.'):
"""
Returns an ExampleSet from the
C4.5 formatted data
"""
schema_name = file_base + NAMES_EXT
data_name = file_base + DATA_EXT
schema_file = find_file(schema_name, rootdir)
if schema_file is None:
raise ValueError('Schema file not found')
data_file = find_file(data_name, rootdir)
if data_file is None:
raise ValueError('Data file not found')
return _parse_c45(schema_file, data_file) | 14d651a48c2fe65ad68c441722c4c39854efef2a | 14,535 |
import torch
def to_numpy(tensor: torch.Tensor):
"""
Convert a PyTorch Tensor to a Numpy Array.
"""
if tensor is None:
return tensor
if tensor.is_quantized:
tensor = tensor.dequantize()
return tensor.cpu().detach().contiguous().numpy() | ed6bd50ef5db30b3df1304a0152998f2f27750c6 | 14,536 |
from flask_sqlalchemy import _wrap_with_default_query_class, SQLAlchemy
def initialize_flask_sqlathanor(db):
"""Initialize **SQLAthanor** contents on a `Flask-SQLAlchemy`_ instance.
:param db: The
:class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>`
instance.
:type db: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>`
:returns: A mutated instance of ``db`` that replaces `SQLAlchemy`_ components and
their `Flask-SQLAlchemy`_ flavors with **SQLAthanor** analogs while maintaining
`Flask-SQLAlchemy`_ and `SQLAlchemy`_ functionality and interfaces.
:rtype: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>`
:raises ImportError: if called when `Flask-SQLAlchemy`_ is not installed
:raises ValueError: if ``db`` is not an instance of
:class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>`
"""
if not isinstance(db, SQLAlchemy):
raise ValueError('db must be an instance of flask_sqlalchemy.SQLAlchemy')
db.Column = Column
db.relationship = _wrap_with_default_query_class(relationship, db.Query)
return db | 565c010a49e9d0ac2e82b40803b4f0871b526177 | 14,537 |
def add_vcf_header( vcf_reader ):
"""
Function to add a new field to the vcf header
Input: A vcf reader object
Return: The vcf reader object with new headers added
"""
# Metadata
vcf_reader.metadata['SMuRFCmd'] = [get_command_line()]
# Formats
vcf_reader.formats['VAF'] = pyvcf.parser._Format('VAF',None,'Float','Variant Allele Frequency calculated from the BAM file')
vcf_reader.formats['CAD'] = pyvcf.parser._Format('CAD',None,'Integer','Calculated Allelic Depth, used for VAF calculation')
vcf_reader.formats['FT'] = pyvcf.parser._Format('FT',None,'String','Sample filter')
# Filters
vcf_reader.filters['KnownVariant'] = pyvcf.parser._Filter('KnownVariant','Variant has already an ID, excluding COSMIC_IDs')
vcf_reader.filters['BadMQ'] = pyvcf.parser._Filter('BadMQ', 'Variant with MQ <'+str(cfg['SMuRF']['mq']))
vcf_reader.filters['BadQual'] = pyvcf.parser._Filter('BadQual','Variant with a QUAL <'+str(cfg['SMuRF']['qual']))
vcf_reader.filters['MultiAllelic'] = pyvcf.parser._Filter('MultiAllelic', 'Variant has multiple alternative alleles')
vcf_reader.filters['BlackList'] = pyvcf.parser._Filter('BlackList', 'Variant exists in a blacklist')
vcf_reader.filters['Indel'] = pyvcf.parser._Filter('Indel','Variant is an indel')
vcf_reader.filters['ControlEvidence'] = pyvcf.parser._Filter('ControlEvidence','Variant is also found in a control based on the GT')
vcf_reader.filters['NoSampleEvidence'] = pyvcf.parser._Filter('NoSampleEvidence','Variant is not found in any of the samples based on the GT')
vcf_reader.filters['AllSamplesFailedQC'] = pyvcf.parser._Filter('AllSamplesFailedQC', 'All samples failed the quality control')
vcf_reader.filters['AllControlsFailedQC'] = pyvcf.parser._Filter('AllControlsFailedQC', 'All controls failed the quality control')
vcf_reader.filters['ControlSubclonal'] = pyvcf.parser._Filter('ControlSubclonal', 'Variant is found as subclonal in a control based on the recalculated VAF')
vcf_reader.filters['ControlClonal'] = pyvcf.parser._Filter('ControlClonal', 'Variant is found as clonal in a control based on the recalculated VAF')
vcf_reader.filters['NoClonalSample'] = pyvcf.parser._Filter('NoClonalSample', 'Variant is not found as clonal in any of the samples based on the recalculated VAF')
# Sample filters
vcf_reader.filters['LowCov'] = pyvcf.parser._Filter('LowCov', 'Variant has a coverage <'+str(cfg['SMuRF']['coverage'])+' in this sample/control')
vcf_reader.filters['NoGenoType'] = pyvcf.parser._Filter('NoGenoType', 'Genotype is empty for this sample/control')
vcf_reader.filters['isRef'] = pyvcf.parser._Filter('isRef', 'Genotype is a reference (i.e. reference 0/0)')
vcf_reader.filters['isVariant'] = pyvcf.parser._Filter('isVariant', 'Genotype is a variant (i.e. not reference 0/0)')
vcf_reader.filters['LowGQ'] = pyvcf.parser._Filter('LowGQ', 'Variant has a low genome quality for this sample/control')
# Infos
vcf_reader.infos['ABSENT_SAMPLES'] = pyvcf.parser._Info('ABSENT_SAMPLES',1,'Integer','Number of samples without the variant', None, None)
vcf_reader.infos['SUBCLONAL_SAMPLES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLES',1,'Integer','Number of samples with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_SAMPLES'] = pyvcf.parser._Info('CLONAL_SAMPLES',1,'Integer','Number of samples with a clonal variant', None, None)
vcf_reader.infos['ABSENT_CONTROLS'] = pyvcf.parser._Info('ABSENT_CONTROLS',1,'Integer','Number of controls without the variant', None, None)
vcf_reader.infos['SUBCLONAL_CONTROLS'] = pyvcf.parser._Info('SUBCLONAL_CONTROLS',1,'Integer','Number of controls with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_CONTROLS'] = pyvcf.parser._Info('CLONAL_CONTROLS',1,'Integer','Number of controls with a clonal variant', None, None)
vcf_reader.infos['ABSENT_SAMPLE_NAMES'] = pyvcf.parser._Info('ABSENT_SAMPLE_NAMES',None,'String','Samples without the variant', None, None)
vcf_reader.infos['SUBCLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLE_NAMES',None,'String','Samples with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('CLONAL_SAMPLE_NAMES',None,'String','Samples with a clonal variant', None, None)
vcf_reader.infos['ABSENT_CONTROL_NAMES'] = pyvcf.parser._Info('ABSENT_CONTROL_NAMES',None,'String','Controls without the variant', None, None)
vcf_reader.infos['SUBCLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('SUBCLONAL_CONTROL_NAMES',None,'String','Controls with a subclonal variant', None, None)
vcf_reader.infos['CLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('CLONAL_CONTROL_NAMES',None,'String','Controls with a clonal variant', None, None)
vcf_reader.infos['PASS_QC_SAMPLES'] = pyvcf.parser._Info('PASS_QC_SAMPLES',1,'Integer','Number of samples which pass all quality control filters', None, None)
vcf_reader.infos['PASS_QC_CONTROLS'] = pyvcf.parser._Info('PASS_QC_CONTROLS',1,'Integer','Number of controls which pass all quality control filters', None, None)
vcf_reader.infos['FAIL_QC_SAMPLES'] = pyvcf.parser._Info('FAIL_QC_SAMPLES',1,'Integer','Number of samples which failed one or multiple quality control filters', None, None)
vcf_reader.infos['FAIL_QC_CONTROLS'] = pyvcf.parser._Info('FAIL_QC_CONTROLS',1,'Integer','Number of controls which failed one or multiple quality control filters', None, None)
vcf_reader.infos['PASS_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('PASS_QC_SAMPLE_NAMES',None,'String','Samples which pass all quality control filters', None, None)
vcf_reader.infos['PASS_QC_CONTROL_NAMES'] = pyvcf.parser._Info('PASS_QC_CONTROL_NAMES',None,'String','Controls which pass all quality control filters', None, None)
vcf_reader.infos['FAIL_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('FAIL_QC_SAMPLE_NAMES',None,'String','Samples which failed one or multiple quality control filters', None, None)
vcf_reader.infos['FAIL_QC_CONTROL_NAMES'] = pyvcf.parser._Info('FAIL_QC_CONTROL_NAMES',None,'String','Controls which failed one or multiple quality control filters', None, None)
return( vcf_reader ) | 36e5819de6c09c7e60638b183bfe415fc19361db | 14,538 |
def get_seats_percent(election_data):
"""
This function takes a lists of lists as and argument, with each list representing a party's election results,
and returns a tuple with the percentage of Bundestag seats won by various political affiliations.
Parameters:
election_data (list): A list of lists, each representing a party's election results
Returns:
A tuple with percentage of Bundestag seats won by various political affiliations
"""
left_seats = 0
right_seats = 0
extreme_seats = 0
center_seats = 0
total_bundestag_seats = 0
for party in election_data[1:]:
total_bundestag_seats += int(party[1])
if 'far' in party[2]:
extreme_seats += party[1]
else:
center_seats += party[1]
if 'left' in party[2]:
left_seats += party[1]
else:
right_seats += party[1]
left_percent = round((left_seats / total_bundestag_seats * 100), 2)
right_percent = round((right_seats / total_bundestag_seats * 100), 2)
extreme_percent = round((extreme_seats / total_bundestag_seats * 100), 2)
center_percent = round((center_seats / total_bundestag_seats * 100), 2)
return left_percent, right_percent, extreme_percent, center_percent | a131d64747c5c0dde8511e9ec4da07252f96a6ec | 14,539 |
def get_player_gamelog(player_id, season, season_type='Regular Season', timeout=30):
"""
Coleta de histórico departidas de um determinado jogador
em uma determinada temporada, considerando ainda um
tipo específico de temporada (pré-season, temporada regular
ou playoffs).
Parâmetros
----------
:param player_id:
Identificação do jogador alvo
[type: int]
:param season:
Temporada alvo de análise
[type: str, exemplo: "2020-21"]
:param season_type:
Tipo específico de temporada aceito pelo endpoint
[type: str, default='Regular Season']
:param timeout:
Tempo máximo de espera da requisição.
[type: int, default=30]
Retorno
-------
:return df_gamelog:
Base de dados com informações específicas e detalhadas
sobre o histórico de partidas extraído do jogador.
Informações sobre o conteúdo desta base de retorno
podem ser encontradas na documentação oficial do
endpoint playergamelog.
[type: pd.DataFrame]
"""
# Retornando gamelog de jogador
player_gamelog = playergamelog.PlayerGameLog(
player_id=player_id,
season=season,
season_type_all_star=season_type,
timeout=timeout
)
# Transformando dados em DataFrame e adicionando informações de temporada
df_gamelog = player_gamelog.player_game_log.get_data_frame()
df_gamelog['SEASON'] = season
df_gamelog['SEASON_TYPE'] = season_type
# Transformando coluna de data na base
df_gamelog['GAME_DATE'] = pd.to_datetime(df_gamelog['GAME_DATE'])
df_gamelog.columns = [col.lower().strip() for col in df_gamelog.columns]
return df_gamelog | d21132df8a4f72055f37ef7039427f42ce03610e | 14,540 |
import re
def unbound_text_to_html5(text, language=None):
"""
Converts the provided text to HTML5 custom data attributes.
Usage:
{{text|unbound_text_to_html5:"Greek"}}
"""
# If the language is English, then don't bother doing anything
if language is not None and language.lower() == "english":
return text
# Make the document that will contain the verse
converted_doc = minidom.Document()
# Make the verse node to attach the content to
verse_node = converted_doc.createElement( "span" )
verse_node.setAttribute("class", "verse")
# Append the
converted_doc.appendChild(verse_node)
# Split up the text and place the text segments in nodes
segments = re.findall("[\s]+|[\[\],.:.;]|[^\s\[\],.:.;]+", text)
for s in segments:
# Don't wrap punctuation in a word node
if s in [";", ",", ".", "[", "]", ":"] or len(s.strip()) == 0:
txt_node = converted_doc.createTextNode(s)
verse_node.appendChild(txt_node)
else:
word_node = converted_doc.createElement( "span" )
word_node.setAttribute( "class", "word" )
# Create the text node and append it
if language is None or language.lower() == "greek":
txt_node = converted_doc.createTextNode(s)
else:
txt_node = converted_doc.createTextNode(transform_text(s, language))
word_node.appendChild(txt_node)
# Append the node
verse_node.appendChild(word_node)
return converted_doc.toxml( encoding="utf-8" ) | 1fe50c4844e126395c1aa1e8d5ba464217003557 | 14,541 |
def sort_points(points):
"""Sorts points first by argument, then by modulus.
Parameters
----------
points : array_like
(n_points, 3)
The points to be sorted: (x, y, intensity)
Returns
-------
points_sorted : :class:`numpy.ndarray`
(n_points, 3)
The sorted points.
"""
positions = points[:, :2].astype(float)
with np.errstate(invalid='ignore', divide='ignore'):
tangents = np.nan_to_num(positions[:, 1]/positions[:, 0])
arguments = np.arctan(tangents)
moduli = np.sqrt(np.sum(np.square(positions), axis=1))
inds = np.lexsort((moduli, arguments))
points_sorted = points[inds]
return points_sorted | 3d5ae7cdfa33abba906cefaf3cd1ab0ab5899e32 | 14,542 |
def get_pairs(scores):
"""
Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair.
Parameters
----------
scores : list of int
Contain a list of numbers
Returns
-------
query_pair : list of pairs
This contains a list of pairs of indexes in scores.
"""
query_pair = []
for query_scores in scores:
temp = sorted(query_scores, reverse=True)
pairs = []
for i in range(len(temp)):
for j in range(len(temp)):
if temp[i] > temp[j]:
pairs.append((i,j))
query_pair.append(pairs)
return query_pair | 1d4bf17dffb7ec8b934701254448e5a7dfe41cf9 | 14,543 |
def make():
"""Make a new migration.
Returns:
Response: json status message
"""
response = None
try:
with capture_print(escape=True) as content:
current_app.config.get('container').make('migrator').make(request.form['name'])
response = {'message': content.get_text(), 'status': 'success'}
except SystemExit:
response = {'message': content.get_text(), 'status': 'error'}
return jsonify(response) | 24524cc1906f621e9e927d3e0b7265b65ab8ebe5 | 14,544 |
def generate_hmac(str_to_sign, secret):
"""Signs the specified string using the specified secret.
Args:
str_to_sign : string, the string to sign
secret : string, the secret used to sign
Returns:
signed_message : string, the signed str_to_sign
"""
message = str_to_sign.encode('utf-8')
secret = secret.encode('utf-8')
cmd = ['echo -n "' + str(message) + '" | openssl dgst -sha256 -binary -hmac "' + str(secret) + '"']
process, signed_message, error = linuxutil.popen_communicate(cmd, shell=True)
if process.returncode != 0:
raise Exception("Unable to generate signature. " + str(error))
return signed_message | c773cb1f470f0f52934758e7f66fe01047419cbd | 14,548 |
def interpolate_scores(coords: np.array, scores: np.array, coord_range: tuple, step: float = 0.001) -> np.array:
"""
Given a coord_range and values for specific coords - interpolate to the rest of the grid
Args:
coords: array of lons and lats of points that their values are known
scores: array of the coords values
coord_range: range of the desired grid
step: resolution of sample
Returns:
z: np.array - 2D array of the values in the entire grid of coord_range
"""
min_lon, min_lat, max_lon, max_lat = coord_range
x = np.arange(min_lon, max_lon, step=step)
y = np.arange(min_lat, max_lat, step=step)
grid_x, grid_y = np.meshgrid(x, y)
z = interpolate.griddata(coords, scores, (x[None, :], y[:, None]), method='linear')
return z | 2ed5660191f01018344e9b55af372f10133bc6a9 | 14,549 |
def remove_blank_from_dict(data):
"""Optimise data from default outputted dictionary"""
if isinstance(data, dict):
return dict(
(key, remove_blank_from_dict(value))
for key, value in data.items()
if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value))
)
if isinstance(data, list):
return [
remove_blank_from_dict(value)
for value in data
if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value))
]
return data | ae77d0b5b9a1cffdd1832df3a5513cc79e600138 | 14,550 |
def merge_mosaic_images(mosaic_dict, mosaic_images, orig_images, Y_orig=None):
""" Merge the list of mosaic images with all original images.
Args:
mosaic_dict: Dictionary specifying how mosaic images were created, returned from make_mosaic
mosaic_images: List of all mosaic images returned from make_mosaic
orig_images: List of all images, some (or all, or none) of which were used to generate the mosaic images
Y_orig: If building mosaic images for training, the Y/expected images corresponding to orig_images
Returns:
3 lists - merged_images, merged_sizes, merged_Y (empty list if Y_orig was not provided). This list of
images can then be resized, windowed, etc., and provided as input images for training or predictions.
To split the merged list back into the separate portions, use split_merged_mosaic.
"""
orig_index = list(range(0, len(orig_images)))
merged_images = []
merged_sizes = []
merged_Y = []
# If Y/expected values are desired, construct the merged Y
# images to correspond with the mosaic images.
if Y_orig:
for k, v in mosaic_dict.items():
merged_Y.append(combine_images(Y_orig, v))
# Mosaic images are output first
for img in mosaic_images:
merged_images.append(img)
merged_sizes.append([img.shape[0], img.shape[1]])
mosaic_all_ix=[]
[mosaic_all_ix.extend(v) for v in mosaic_dict.values()]
leftovers = [x for x in orig_index if x not in mosaic_all_ix]
# And then output all images that are not part of a larger mosaic image
for ix in leftovers:
leftover_img = orig_images[ix]
merged_images.append(leftover_img)
merged_sizes.append([leftover_img.shape[0], leftover_img.shape[1]])
if Y_orig:
merged_Y.append(Y_orig[ix])
return (merged_images, merged_sizes, merged_Y) | d16875462c09b671db785ec101eb09028b1a7cbe | 14,553 |
def show2D(dd, impixel=None, im=None, fig=101, verbose=1, dy=None, sigma=None, colorbar=False, title=None, midx=2, units=None):
""" Show result of a 2D scan
Args:
dd (DataSet)
impixel (array or None)
im (array or None)
"""
if dd is None:
return None
extent, g0, g1, vstep, vsweep, arrayname = dataset2Dmetadata(dd)
tr = image_transform(dd, mode='pixel')
array = getattr(dd, arrayname)
if impixel is None:
if im is None:
im = np.array(array)
impixel = tr._transform(im)
else:
pass
else:
pass
labels = [s.name for s in array.set_arrays]
xx = extent
xx = tr.matplotlib_image_extent()
ny = vstep.size
nx = vsweep.size
im = qtt.utilities.tools.diffImageSmooth(impixel, dy=dy, sigma=sigma)
if verbose:
print('show2D: nx %d, ny %d' % (nx, ny,))
if verbose >= 2:
print('extent: %s' % xx)
if units is None:
unitstr = ''
else:
unitstr = ' (%s)' % units
if fig is not None:
scanjob = dd.metadata.get('scanjob', dict())
pgeometry.cfigure(fig)
plt.clf()
if impixel is None:
if verbose >= 2:
print('show2D: show raw image')
plt.pcolormesh(vstep, vsweep, im)
else:
if verbose >= 2:
print('show2D: show image')
plt.imshow(impixel, extent=xx, interpolation='nearest')
labelx = labels[1]
labely = labels[0]
if scanjob.get('sweepdata', None) is not None:
labelx = sweepgate(scanjob)
plt.xlabel('%s' % labelx + unitstr)
else:
pass
if scanjob.get('stepdata', None) is not None:
if units is None:
plt.ylabel('%s' % stepgate(scanjob))
else:
plt.ylabel('%s (%s)' % (stepgate(scanjob), units))
if not title is None:
plt.title(title)
if colorbar:
plt.colorbar()
if verbose >= 2:
print('show2D: at show')
try:
plt.show(block=False)
except:
# ipython backend does not know about block keyword...
plt.show()
return xx, vstep, vsweep | d9560e2dd54ed8daf8450e2db8e1f5d8357a601c | 14,554 |
def get_job(api_key, jq_id):
"""
Fetch a job and its status
:param api_key: user id of the client
:param jq_id: job queue id
:return: job queue id
"""
if Auth.verify_auth_key(api_key):
if Auth.verify_job(api_key, jq_id):
return trigger.get_job(jq_id)
return abort(400) | f323386e530e354f52bcbcc6301c5fb1af4e4767 | 14,555 |
def contour_to_valid(cnt, image_shape):
"""Convert rect to xys, i.e., eight points
The `image_shape` is used to to make sure all points return are valid, i.e., within image area
"""
# rect = cv2.minAreaRect(cnt)
if len(cnt.shape) != 3:
assert 1 < 0
rect = cnt.reshape([cnt.shape[0], cnt.shape[2]])
h, w = image_shape[0:2]
def get_valid_x(x):
if x < 0:
return 0
if x >= w:
return w - 1
return x
def get_valid_y(y):
if y < 0:
return 0
if y >= h:
return h - 1
return y
for i_xy, (x, y) in enumerate(rect):
x = get_valid_x(x)
y = get_valid_y(y)
rect[i_xy, :] = [x, y]
points = np.reshape(rect, -1)
return points | a4f85d77c0805903b220d3670edc5db05ea001ed | 14,556 |
def search(datafile, query, bool_operator):
"""
Queries on a set of documents.
:param datafile: The location of the datafile as a pathlib.Path
:param query: the query text
:param bool_operator: the operator. Must be one of [OR, AND]
:return: the list of indexes matching the search criteria
"""
# we normalize to uinique lowercase words the query string and split by space
query = _extract_words(query)
# we read the datafile
data = datafile.readlines()
# calculating results
results = [str(i) for i, text in enumerate(data)
if (query.issubset(_extract_words(text))
if bool_operator == 'AND'
else bool(query.intersection(_extract_words(text))))]
return results | 6f8ec35063178e49a557de1849363568561638ed | 14,557 |
from typing import Optional
from typing import List
def recurse_structures(
structure: Component,
ignore_components_prefix: Optional[List[str]] = None,
ignore_functions_prefix: Optional[List[str]] = None,
) -> DictConfig:
"""Recurse over structures"""
ignore_functions_prefix = ignore_functions_prefix or []
ignore_components_prefix = ignore_components_prefix or []
if (
hasattr(structure, "function_name")
and structure.function_name in ignore_functions_prefix
):
return DictConfig({})
if hasattr(structure, "name") and any(
[structure.name.startswith(i) for i in ignore_components_prefix]
):
return DictConfig({})
output = {structure.name: structure.info}
for element in structure.references:
if (
isinstance(element, ComponentReference)
and element.ref_cell.name not in output
):
output.update(recurse_structures(element.ref_cell))
return output | 696e04a0184ebb7b8d2e0789e711a676c12ed89c | 14,558 |
from spinalcordtoolbox.image import Image
import dipy.reconst.dti as dti
import dipy.denoise.noise_estimate as ne
def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask):
"""
Compute DTI.
:param fname_in: input 4d file.
:param bvals: bvals txt file
:param bvecs: bvecs txt file
:param prefix: output prefix. Example: "dti_"
:param method: algo for computing dti
:param evecs: bool: output diffusion tensor eigenvectors
:return: True/False
"""
# Open file.
nii = Image(fname_in)
data = nii.data
sct.printv('data.shape (%d, %d, %d, %d)' % data.shape)
# open bvecs/bvals
bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs)
gtab = gradient_table(bvals, bvecs)
# mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image.
if not file_mask == '':
sct.printv('Open mask file...', param.verbose)
# open mask file
nii_mask = Image(file_mask)
mask = nii_mask.data
# fit tensor model
sct.printv('Computing tensor using "' + method + '" method...', param.verbose)
if method == 'standard':
tenmodel = dti.TensorModel(gtab)
if file_mask == '':
tenfit = tenmodel.fit(data)
else:
tenfit = tenmodel.fit(data, mask)
elif method == 'restore':
sigma = ne.estimate_sigma(data)
dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma)
if file_mask == '':
tenfit = dti_restore.fit(data)
else:
tenfit = dti_restore.fit(data, mask)
# Compute metrics
sct.printv('Computing metrics...', param.verbose)
# FA
nii.data = tenfit.fa
nii.save(prefix + 'FA.nii.gz', dtype='float32')
# MD
nii.data = tenfit.md
nii.save(prefix + 'MD.nii.gz', dtype='float32')
# RD
nii.data = tenfit.rd
nii.save(prefix + 'RD.nii.gz', dtype='float32')
# AD
nii.data = tenfit.ad
nii.save(prefix + 'AD.nii.gz', dtype='float32')
if evecs:
data_evecs = tenfit.evecs
# output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data
for idim in range(3):
nii.data = data_evecs[:, :, :, :, idim]
nii.save(prefix + 'V' + str(idim+1) + '.nii.gz', dtype="float32")
return True | 149cfbc3f4fa2f3c1a33e4d4e6ee09983176e1b4 | 14,559 |
import tqdm
def solve(
netlist=None,
parameter_values=None,
experiment=None,
I_init=1.0,
htc=None,
initial_soc=0.5,
nproc=12,
output_variables=None,
):
"""
Solves a pack simulation
Parameters
----------
netlist : pandas.DataFrame
A netlist of circuit elements with format. desc, node1, node2, value.
Produced by liionpack.read_netlist or liionpack.setup_circuit
parameter_values : pybamm.ParameterValues class
A dictionary of all the model parameters
experiment : pybamm.Experiment class
The experiment to be simulated. experiment.period is used to
determine the length of each timestep.
I_init : float, optional
Initial guess for single battery current [A]. The default is 1.0.
htc : float array, optional
Heat transfer coefficient array of length Nspm. The default is None.
initial_soc : float
The initial state of charge for every battery. The default is 0.5
nproc : int, optional
Number of processes to start in parallel for mapping. The default is 12.
output_variables : list, optional
Variables to evaluate during solve. Must be a valid key in the
model.variables
Raises
------
Exception
DESCRIPTION.
Returns
-------
output : ndarray shape [# variable, # steps, # batteries]
simulation output array
"""
if netlist is None or parameter_values is None or experiment is None:
raise Exception("Please supply a netlist, paramater_values, and experiment")
# Get netlist indices for resistors, voltage sources, current sources
Ri_map = netlist["desc"].str.find("Ri") > -1
V_map = netlist["desc"].str.find("V") > -1
I_map = netlist["desc"].str.find("I") > -1
Terminal_Node = np.array(netlist[I_map].node1)
Nspm = np.sum(V_map)
# Generate the protocol from the supplied experiment
protocol = lp.generate_protocol_from_experiment(experiment)
dt = experiment.period
Nsteps = len(protocol)
# Solve the circuit to initialise the electrochemical models
V_node, I_batt = lp.solve_circuit(netlist)
# Create battery simulation and update initial state of charge
sim = lp.create_simulation(parameter_values, make_inputs=True)
lp.update_init_conc(sim, SoC=initial_soc)
# The simulation output variables calculated at each step for each battery
# Must be a 0D variable i.e. battery wide volume average - or X-averaged for 1D model
variable_names = [
"Terminal voltage [V]",
"Measured battery open circuit voltage [V]",
]
if output_variables is not None:
for out in output_variables:
if out not in variable_names:
variable_names.append(out)
# variable_names = variable_names + output_variables
Nvar = len(variable_names)
# Storage variables for simulation data
shm_i_app = np.zeros([Nsteps, Nspm], dtype=float)
shm_Ri = np.zeros([Nsteps, Nspm], dtype=float)
output = np.zeros([Nvar, Nsteps, Nspm], dtype=float)
# Initialize currents in battery models
shm_i_app[0, :] = I_batt * -1
# Set up integrator
integrator, variables_fn, t_eval = _create_casadi_objects(
I_init, htc[0], sim, dt, Nspm, nproc, variable_names
)
# Step forward in time
time = 0
end_time = dt * Nsteps
step_solutions = [None] * Nspm
V_terminal = []
record_times = []
v_cut_lower = parameter_values["Lower voltage cut-off [V]"]
v_cut_higher = parameter_values["Upper voltage cut-off [V]"]
sim_start_time = ticker.time()
for step in tqdm(range(Nsteps), desc='Solving Pack'):
# Step the individual battery models
step_solutions, var_eval = _mapped_step(
sim.built_model,
step_solutions,
lp.build_inputs_dict(shm_i_app[step, :], htc),
integrator,
variables_fn,
t_eval,
)
output[:, step, :] = var_eval
time += dt
# Calculate internal resistance and update netlist
temp_v = output[0, step, :]
temp_ocv = output[1, step, :]
# temp_Ri = output[2, step, :]
# This could be used instead of Equivalent ECM resistance which has
# been changing definition
temp_Ri = (temp_ocv - temp_v) / shm_i_app[step, :]
# Make Ri more stable
current_cutoff = np.abs(shm_i_app[step, :]) < 1e-6
temp_Ri[current_cutoff] = 1e-12
# temp_Ri = 1e-12
shm_Ri[step, :] = temp_Ri
netlist.loc[V_map, ("value")] = temp_ocv
netlist.loc[Ri_map, ("value")] = temp_Ri
netlist.loc[I_map, ("value")] = protocol[step]
# Stop if voltage limits are reached
if np.any(temp_v < v_cut_lower):
print("Low voltage limit reached")
break
if np.any(temp_v > v_cut_higher):
print("High voltage limit reached")
break
if time <= end_time:
record_times.append(time)
V_node, I_batt = lp.solve_circuit(netlist)
V_terminal.append(V_node[Terminal_Node][0])
if time < end_time:
shm_i_app[step + 1, :] = I_batt[:] * -1
# Collect outputs
all_output = {}
all_output["Time [s]"] = np.asarray(record_times)
all_output["Pack current [A]"] = np.asarray(protocol[: step + 1])
all_output["Pack terminal voltage [V]"] = np.asarray(V_terminal)
all_output["Cell current [A]"] = shm_i_app[: step + 1, :]
for j in range(Nvar):
all_output[variable_names[j]] = output[j, : step + 1, :]
toc = ticker.time()
lp.logger.notice(
"Solve circuit time " + str(np.around(toc - sim_start_time, 3)) + "s"
)
return all_output | fa51e4e69a434e3a3c728b4675844c0bfa29d3fd | 14,560 |
def global_node_entropy(data, dx=3, dy=1, taux=1, tauy=1, overlapping=True, connections="all", tie_precision=None):
"""
Calculates global node entropy\\ [#pessa2019]_\\ :sup:`,`\\ [#McCullough]_ for an
ordinal network obtained from data. (Assumes directed and weighted edges).
Parameters
----------
data : array, return of :func:`ordpy.ordinal_network`
Array object in the format :math:`[x_{1}, x_{2}, x_{3}, \\ldots ,x_{n}]`
or :math:`[[x_{11}, x_{12}, x_{13}, \\ldots, x_{1m}],
\\ldots, [x_{n1}, x_{n2}, x_{n3}, \\ldots, x_{nm}]]`
or an ordinal network returned by :func:`ordpy.ordinal_network`\\ [*]_.
dx : int
Embedding dimension (horizontal axis) (default: 3).
dy : int
Embedding dimension (vertical axis); it must be 1 for time series
(default: 1).
taux : int
Embedding delay (horizontal axis) (default: 1).
tauy : int
Embedding delay (vertical axis) (default: 1).
overlapping : boolean
If `True`, **data** is partitioned into overlapping sliding
windows (default: `True`). If `False`, adjacent partitions are
non-overlapping.
connections : str
The ordinal network is constructed using `'all'` permutation
successions in a symbolic sequence or only `'horizontal'` or
`'vertical'` successions. Parameter only valid for image data
(default: `'all'`).
tie_precision : int
If not `None`, **data** is rounded with `tie_precision`
number of decimals (default: `None`).
Returns
-------
: float
Value of global node entropy.
Notes
-----
.. [*] In case **data** is an ordinal network returned by
:func:`ordpy.ordinal_network`, the parameters of
:func:`ordpy.global_node_entropy` are infered from the network.
Examples
--------
>>> global_node_entropy([1,2,3,4,5,6,7,8,9], dx=2)
0.0
>>>
>>> global_node_entropy(ordinal_network([1,2,3,4,5,6,7,8,9], dx=2))
0.0
>>>
>>> global_node_entropy(np.random.uniform(size=100000), dx=3)
1.4988332319747597
>>>
>>> global_node_entropy(random_ordinal_network(dx=3))
1.5
>>>
>>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='horizontal')
0.25
>>>
>>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='vertical')
0.0
"""
if len(data)==3 and type(data[0][0])==np.str_:
nodes, links, weights = data
else:
#assumes 'normalized==True' and 'directed==True'.
nodes, links, weights = ordinal_network(data, dx, dy, taux, tauy, True,
overlapping, True, connections, tie_precision=tie_precision)
links_source = links.transpose()[0]
links_target = links.transpose()[1]
h_gn = 0
for node in nodes:
args = np.argwhere(links_source==node).flatten()
renorm_weights = weights[args]/np.sum(weights[args])
args_in = np.argwhere(links_target==node).flatten()
p_in = np.sum(weights[args_in])
h_i = -np.sum(renorm_weights*np.log2(renorm_weights))
h_gn += p_in*h_i
return h_gn | a14e7419bcd58d41400b888443df726836e6d04a | 14,561 |
def get_cert(certificate):
"""
Return the data of the certificate
:returns: the certificate file contents
"""
cert_file = "{}/certs/{}".format(snapdata_path, certificate)
with open(cert_file) as fp:
cert = fp.read()
return cert | da2ac96bf16a74de9ac75a46d14f3b95b5f64264 | 14,562 |
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27):
"""
Trains the model and generates dinosaur names.
Arguments:
data -- text corpus
ix_to_char -- dictionary that maps the index to a character
char_to_ix -- dictionary that maps a character to an index
num_iterations -- number of iterations to train the model for
n_a -- number of units of the RNN cell
dino_names -- number of dinosaur names you want to sample at each iteration.
vocab_size -- number of unique characters found in the text (size of the vocabulary)
Returns:
parameters -- learned parameters
"""
# Retrieve n_x and n_y from vocab_size
n_x, n_y = vocab_size, vocab_size
# Initialize parameters
parameters = initialize_parameters(n_a, n_x, n_y)
# Initialize loss (this is required because we want to smooth our loss)
loss = get_initial_loss(vocab_size, dino_names)
# Build list of all dinosaur names (training examples).
with open("dinos.txt") as f:
examples = f.readlines()
examples = [x.lower().strip() for x in examples]
# Shuffle list of all dinosaur names
np.random.seed(0)
np.random.shuffle(examples)
# Initialize the hidden state of your LSTM
a_prev = np.zeros((n_a, 1))
# Optimization loop
for j in range(num_iterations):
### START CODE HERE ###
# Set the index `idx` (see instructions above)
idx = j%len(examples)
# Set the input X (see instructions above)
single_example = examples[idx]
single_example_chars = [c for c in single_example]
single_example_ix = [char_to_ix[c] for c in single_example_chars]
X = [None]+single_example_ix
# Set the labels Y (see instructions above)
ix_newline = char_to_ix["\n"]
Y = X[1:]+[ix_newline]
# Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters
# Choose a learning rate of 0.01
curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01)
### END CODE HERE ###
# Use a latency trick to keep the loss smooth. It happens here to accelerate the training.
loss = smooth(loss, curr_loss)
# Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly
if j % 2000 == 0:
print('Iteration: %d, Loss: %f' % (j, loss) + '\n')
# The number of dinosaur names to print
seed = 0
for name in range(dino_names):
# Sample indices and print them
sampled_indices = sample(parameters, char_to_ix, seed)
print_sample(sampled_indices, ix_to_char)
seed += 1 # To get the same result (for grading purposes), increment the seed by one.
print('\n')
return parameters | b1fb202b2c697cae1473c91597b39914e6197dce | 14,563 |
import random
def random_choice(gene):
"""
Randomly select a object, such as strings, from a list. Gene must have defined `choices` list.
Args:
gene (Gene): A gene with a set `choices` list.
Returns:
object: Selected choice.
"""
if not 'choices' in gene.__dict__:
raise KeyError("'choices' not defined in this gene, please include a list values!")
return random.choice(gene.choices) | 8a01a2039a04262aa4fc076bdd87dbf760f45253 | 14,564 |
def get_actor(payload: PayloadJSON, actor_id: int) -> ResourceJSON:
"""Return an actor by actor_id"""
actor = ActorModel.find_by_id(actor_id)
if actor is None:
abort(404)
return jsonify({"success": True, "actor": actor.json()},) | 0808cba237e47a45dd095f86d44153f97a947e66 | 14,565 |
import string
def check_if_punctuations(word: str) -> bool:
"""Returns ``True`` if ``word`` is just a sequence of punctuations."""
for c in word:
if c not in string.punctuation:
return False
return True | 64ba5f9dc69c59490a2ea69e7c2d938151d71b37 | 14,566 |
import re
def normalize_text(string, remove_stopwords=False, stem_words=False):
"""
Remove punctuation, parentheses, question marks, etc.
"""
strip_special_chars = re.compile("[^A-Za-z0-9 ]+")
string = string.lower()
string = string.replace("<br />", " ")
string = string.replace(r"(\().*(\))|([^a-zA-Z'])",' ')
string = string.replace('&', 'and')
string = string.replace('@', 'at')
string = string.replace('0', 'zero')
string = string.replace('1', 'one')
string = string.replace('2', 'two')
string = string.replace('3', 'three')
string = string.replace('4', 'four')
string = string.replace('5', 'five')
string = string.replace('6', 'six')
string = string.replace('7', 'seven')
string = string.replace('8', 'eight')
string = string.replace('9', 'nine')
string = string.split()
if remove_stopwords:
stop_words = stopwords.words('english')
string = [w for w in string if w not in stop_words]
if stem_words:
ps = PorterStemmer()
string = [ps.stem(w) for w in string]
string = ' '.join(string)
return re.sub(strip_special_chars, "", string) | 0aff8864f526ffe194c661acc69ccb2cf91a6f24 | 14,567 |
def get_new_codes():
""" Return New Codes and Refresh DB"""
db = dataset.connect(database_url)
new_codes = get_code()
table = db['promo']
""" Get New Codes"""
new = {}
for key, value in new_codes.items():
if table.find_one(promo=key) is None:
new[key] = [new_codes[key][0], new_codes[key][1]]
else:
pass
""" Add to DB """
for key in new:
table.insert(dict(promo=key, desc=new_codes[key][1], exp=new_codes[key][0]))
return new | e3ece2e8b43fa43ac4c8d384dbf55957d8bc62c6 | 14,568 |
def process_bulk_add_ip(request, formdict):
"""
Performs the bulk add of ips by parsing the request data. Batches
some data into a cache object for performance by reducing large
amounts of single database queries.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param formdict: The form representing the bulk uploaded data.
:type formdict: dict
:returns: :class:`django.http.HttpResponse`
"""
ip_names = []
cached_results = {}
cleanedRowsData = convert_handsontable_to_rows(request)
for rowData in cleanedRowsData:
if rowData != None and rowData.get(form_consts.IP.IP_ADDRESS) != None:
ip_names.append(rowData.get(form_consts.IP.IP_ADDRESS).lower())
ip_results = IP.objects(ip__in=ip_names)
for ip_result in ip_results:
cached_results[ip_result.ip] = ip_result
cache = {form_consts.IP.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData}
response = parse_bulk_upload(request, parse_row_to_bound_ip_form, add_new_ip_via_bulk, formdict, cache)
return response | d38bc7766f232b972637da9c92567ebde91ddf52 | 14,569 |
def gen_public_e(lambda_: int) -> int:
""" Generates decrecingly smaller sequence of bytes and
converts them to integer until one satisfies > lambda
Continues with half the ammount of necesary bytes decreasing
by one integer until gcd(candidate, lambda) == 1.
"""
bytes_ = 1028 + 1
candidate = crand(bytes_)
while candidate > lambda_: # Finds random amount of bytes
candidate = crand(bytes_)
bytes_ -= 1
candidate = crand(bytes_ // 2) # Generates new candidate in the middle
e = 2**16 + 1
while candidate > e: # Finds candidate that satisfies gcd
if gcd(candidate, lambda_) == 1:
break
candidate -= 1
return candidate | 903df12b7af83be24d3ef377e2aa95a00a7df089 | 14,570 |
from typing import Union
from re import T
from typing import Callable
def lazy(maybe_callable: Union[T, Callable[[], T]]) -> T:
"""
Call and return a value if callable else return it.
>>> lazy(42)
42
>>> lazy(lambda: 42)
42
"""
if callable(maybe_callable):
return maybe_callable()
return maybe_callable | 83522ae39b8ec19e86d5e30b2b0a9131a7c56a35 | 14,571 |
from random import shuffle
def shuffle_list(*ls):
""" shuffle multiple list at the same time
:param ls:
:return:
"""
l = list(zip(*ls))
shuffle(l)
return zip(*l) | ec46e4a8da2c04cf62da2866d2d685fc796887e5 | 14,573 |
def cancer_variants(institute_id, case_name):
"""Show cancer variants overview."""
data = controllers.cancer_variants(store, request.args, institute_id, case_name)
return data | 185282e9308f7a9f8a0d7faf4c5d3608dee556cc | 14,574 |
def nodes(G):
"""Returns an iterator over the graph nodes."""
return G.nodes() | 3a1a543f1af4d43c79fd0083eb77fedd696547ec | 14,575 |
from typing import Union
def cyclePosition(image: np.ndarray, startPosition: position) -> Union[position, bool]:
"""
:param image: numpy image array
:param startPosition: from where to go to Tuple (x,y)
:return: newPosition (x,y), or false if new coords would fall out of bounds
"""
if not imageWrapper.boundsChecker(image, startPosition):
return False
if startPosition.coords[0] == image.shape[1] - 1:
if startPosition.coords[1] < image.shape[0] - 1:
return position((0, startPosition.coords[1] + 1))
return False
return position((startPosition.coords[0] + 1, startPosition.coords[1])) | a4d3eaf1ddecc884f7391614ae04ff4b10029af3 | 14,576 |
def context_to_ingestion_params(context):
"""extract the ingestion task params from job/serving context"""
featureset_uri = context.get_param("featureset")
featureset = context.get_store_resource(featureset_uri)
infer_options = context.get_param("infer_options", InferOptions.Null)
source = context.get_param("source")
if source:
source = get_source_from_dict(source)
elif featureset.spec.source.to_dict():
source = get_source_from_dict(featureset.spec.source.to_dict())
overwrite = context.get_param("overwrite", None)
targets = context.get_param("targets", None)
if not targets:
targets = featureset.spec.targets
targets = [get_target_driver(target, featureset) for target in targets]
return featureset, source, targets, infer_options, overwrite | 41925ce484bbc273caf9a8f0f33eba0e7163a7c8 | 14,578 |
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma,
maxL_sigma, Er_vect_cal_orderedSigma_,
bins, coverage_percentile):
""" Bin the values of the standard deviations observed during
inference and estimate a specified coverage percentile
in the absolute error (observed during inference as well).
Bins that have less than 50 samples are merged until they
surpass this threshold.
Parameters
----------
pSigma_cal_ordered_ : numpy array
Array of standard deviations ordered in ascending way.
minL_sigma : float
Minimum value of standard deviations included in
pSigma_cal_ordered_ array.
maxL_sigma : numpy array
Maximum value of standard deviations included in
pSigma_cal_ordered_ array.
Er_vect_cal_orderedSigma_ : numpy array
Array ob absolute value of errors corresponding with
the array of ordered standard deviations.
bins : int
Number of bins to split the range of standard deviations
included in pSigma_cal_ordered_ array.
coverage_percentile : float
Value to use for estimating coverage when evaluating the percentiles
of the observed absolute value of errors.
Return
----------
mean_sigma : numpy array
Array with the mean standard deviations computed per bin.
min_sigma : numpy array
Array with the minimum standard deviations computed per bin.
max_sigma : numpy array
Array with the maximum standard deviations computed per bin.
error_thresholds : numpy array
Thresholds of the errors computed to attain a certain
error coverage per bin.
err_err : numpy array
Error bars in errors (one standard deviation for a binomial
distribution estimated by bin vs. the other bins) for the
calibration error.
"""
# thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins)
thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins)
classes = np.digitize(pSigma_cal_ordered_, thresholds)
Nbin = np.zeros(bins + 1)
for i in range(bins + 1):
indices = (classes == i)
Nbin[i] = indices.sum()
# Repair bins
new_thresholds_l = []
new_nbins_l = []
sumN = 0
for i in range(Nbin.shape[0]):
sumN += Nbin[i]
if sumN > 50:
if i > (thresholds.shape[0] - 1):
new_thresholds_l.append(thresholds[-1])
else:
new_thresholds_l.append(thresholds[i])
new_nbins_l.append(sumN)
sumN = 0
new_thresholds = np.array(new_thresholds_l)
new_nbins = np.array(new_nbins_l)
new_thresholds[-1] = thresholds[-1]
new_nbins[-1] += sumN
#
classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1])
error_thresholds = -1. * np.ones(new_nbins.shape[0])
mean_sigma = -1. * np.ones(new_nbins.shape[0])
min_sigma = -1. * np.ones(new_nbins.shape[0])
max_sigma = -1. * np.ones(new_nbins.shape[0])
err_err = -1. * np.ones(new_nbins.shape[0])
Ncal = pSigma_cal_ordered_.shape[0]
for i in range(error_thresholds.shape[0]):
indices = (classes == i)
n_aux = indices.sum()
assert n_aux == new_nbins[i]
print('Points in bin %d: %d' % (i, n_aux))
mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices])
min_sigma[i] = np.min(pSigma_cal_ordered_[indices])
max_sigma[i] = np.max(pSigma_cal_ordered_[indices])
error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile)
err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i]
return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err | cb241f6292726a86a7505e950f32fd1c1fefd19f | 14,579 |
def add_user(request):
"""注册用户"""
info = {}
tpl_name = 'user/add_user.html'
if request.method == 'POST':
# 保存用户提交数据
nickname = request.POST.get('nickname')
if User.objects.filter(nickname__exact=nickname).exists():
# "昵称" 存在
info = {'error':'"昵称"存在'}
# 显示注册页面
return render(request,tpl_name,info)
password = request.POST.get('password')
password2 = request.POST.get('password2')
if password != password2:
# 2次密码不一样
info = {'error': '2次密码不一致'}
# 显示注册页面
return render(request,tpl_name,info)
age = request.POST.get('age')
sex = request.POST.get('sex')
f_in = request.FILES.get('icon')
user = User(nickname=nickname,password=password,age=age,sex=sex)
if f_in:
user.icon.save(f_in.name,f_in,save=False)
user.set_password(password)
user.save()
# 在session中,记录用户信息
request.session['uid'] = user.id
request.session['nickname'] = user.nickname
# 跳转到用户信息
url = '/user/read_user/?uid={}'.format(user.id)
return redirect(url)
else:
# 显示注册页面
return render(request,tpl_name,info) | bbfd3bc8ed47f19f527352f39d5e5b2bdc80d450 | 14,580 |
def process_input(input_string, max_depth):
"""
Clean up the input, convert it to an array and compute the longest
array, per feature type.
"""
# remove the quotes and extra spaces from the input string
input_string = input_string.replace('"', '').replace(', ', ',').strip()
# convert the string to an array and also track the longest array, so
# we know how many levels for the feature type.
tmp = []
if input_string:
tmp = input_string.split(',')
if max_depth < len(tmp):
max_depth = len(tmp)
# return the array and the depth
return tmp, max_depth | ca0fddd0b3bf145c7fc0654212ae43f02799b466 | 14,581 |
import random
def generate_new_shape() -> tuple[int, list[int], list[int]]:
"""Generate new shape
#0:
hot_cell_y = [0,1,2,3]
hot_cell_x = [5,5,5,5]
X
X
X
X
#1:
hot_cell_y = [0,0,0,0]
hot_cell_x = [3,4,5,6]
XXXX
#2:
hot_cell_y = [0,1,0,1]
hot_cell_x = [4,4,5,5]
XX
XX
#3.
hot_cell_y = [0,0,1,1]
hot_cell_x = [4,5,5,6]
XX
XX
#4.
hot_cell_y = [0,1,1,2]
hot_cell_x = [4,4,5,5]
X
XX
X
#5.
hot_cell_y = [0,1,2,2]
hot_cell_x = [4,4,4,5]
X
X
XX
#6.
hot_cell_y = [1,0,1,1]
hot_cell_x = [4,4,5,6]
X
XXX
"""
shape_id = random.randint(1, 7)
logger.info("generating shape id => " + str(shape_id))
shape_color = shape_id
if(shape_id == 2):
shape_y_pos_list = [0, 0, 0, 0]
shape_x_pos_list = [3, 4, 5, 6]
elif(shape_id == 3):
shape_y_pos_list = [0, 1, 0, 1]
shape_x_pos_list = [4, 4, 5, 5]
elif(shape_id == 4):
shape_y_pos_list = [0, 0, 1, 1]
shape_x_pos_list = [4, 5, 5, 6]
elif(shape_id == 5):
shape_y_pos_list = [0, 1, 1, 2]
shape_x_pos_list = [4, 4, 5, 5]
elif(shape_id == 6):
shape_y_pos_list = [0, 1, 2, 2]
shape_x_pos_list = [4, 4, 4, 5]
elif(shape_id == 7):
shape_y_pos_list = [0, 1, 1, 1]
shape_x_pos_list = [4, 4, 5, 6]
else:
shape_y_pos_list = [0, 1, 2, 3]
shape_x_pos_list = [5, 5, 5, 5]
return (shape_color, shape_x_pos_list, shape_y_pos_list) | d7c2c710f72ed5d21b7e63779815ee7bfb8421f4 | 14,582 |
def get_process_list(process):
"""Analyse the process description and return the Actinia process chain and the name of the processing result
:param process: The process description
:return: (output_names, actinia_process_list)
"""
input_names, process_list = analyse_process_graph(process)
output_names = []
# First analyse the data entrie
if "data_id" not in process:
raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME)
output_names.append(process["data_id"])
pc = create_process_chain_entry(input_name=process["data_id"])
process_list.append(pc)
# Then add the input to the output
for input_name in input_names:
# Create the output name based on the input name and method
output_name = input_name
output_names.append(output_name)
return output_names, process_list | f1e6689c50e00117379107fc840a09f4638c3912 | 14,583 |
def createAES(key, IV, implList=None):
"""Create a new AES object.
:type key: str
:param key: A 16, 24, or 32 byte string.
:type IV: str
:param IV: A 16 byte string
:rtype: tlslite.utils.AES
:returns: An AES object.
"""
if implList is None:
implList = ["openssl", "pycrypto", "python"]
for impl in implList:
if impl == "openssl" and cryptomath.m2cryptoLoaded:
return openssl_aes.new(key, 2, IV)
elif impl == "pycrypto" and cryptomath.pycryptoLoaded:
return pycrypto_aes.new(key, 2, IV)
elif impl == "python":
return python_aes.new(key, 2, IV)
raise NotImplementedError() | c41a5d028028383630b0977522a9617334c94d03 | 14,584 |
def update_status_issue(issue, status_id, notes):
"""Request to change the status of a problem in a redmine project.
'issue': A hash of the issue is bound to a redmine project.
'status_id': Id status used by redmine the project.
'notes': Comments about the update.
Return value:
0 - on success
non zero - HTTP protocol errors are valid responses.
"""
values = '''{ "issue": { "status_id": "%s", "notes": "%s" } }''' % (status_id, notes)
req = Request(
'%s/issues/%s.json' % (_service_host_project(), issue), data=values.encode(), method='PUT')
req.add_header('Content-Type', 'application/json')
req.add_header('X-Redmine-API-Key', _service_access_key())
try:
with urlopen(req) as context:
pass
return 0 if context.code == 200 else context.code
except HTTPError as err:
print('The server couldn\'t fulfill the request.')
print('Error code: ', err.code)
except URLError as err:
print('We failed to reach a server.')
print('Reason: ', err.reason) | 6c0118f514083d228ac1d27271d297b3e593be52 | 14,587 |
from typing import Optional
def _get_avgiver_epost(root: ET.Element, ns: dict) -> Optional[str]:
"""
Sought: the email of the submitter
Can be found in a child element (<mets:note>) of an <mets:agent> with
ROLE="OTHER", OTHERROLE="SUBMITTER", TYPE="INDIVIDUAL"
"""
try:
agent = [
agent for agent in _get_agent_elements(root, ns)
if (
agent.get("ROLE") == "OTHER" and
agent.get("OTHERROLE") == "SUBMITTER" and
agent.get("TYPE") == "INDIVIDUAL"
)
].pop()
notes = agent.findall("mets:note", namespaces=ns)
email = [
note.text for note in notes
if "@" in note.text
].pop()
return email
except IndexError:
return None | 9be1f53fcf9799f3a559cd12b3bfa056aaac11a7 | 14,588 |
def xavier_init(fan_in, fan_out, constant=1):
"""
Xavier initialization of network weights\
"""
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32, seed=np.random.randint(0, 1e9)) | 35b6f7b75eb44f1828d82c6743ec4751db4ff234 | 14,589 |
def strToMat3(dbstr):
"""
convert a string like e00, e01, e02, ... into Mat3
:param str:
:return: panda Mat4
"""
exx = dbstr.split(',')
exxdecimal = map(float, exx)
assert(len(exxdecimal) is 16)
return Mat3(exxdecimal[0], exxdecimal[1], exxdecimal[2],
exxdecimal[4], exxdecimal[5], exxdecimal[6],
exxdecimal[8], exxdecimal[9], exxdecimal[10]) | 8db33dc5e2fab613cd6cba00021486fe722c8d32 | 14,590 |
def map2(func, *matrix):
"""
Maps a function onto the elements of a matrix
Also accepts multiple matrices. Thus matrix addition is
map2(add, matrix1, matrix2)
"""
matrix2 = []
for i in xrange(len(matrix[0])):
row2 = []
matrix2.append(row2)
for j in xrange(len(matrix[0][i])):
args = [x[i][j] for x in matrix]
row2.append(func(* args))
return matrix2 | 9af6f311c80e70789ba6d623776fc2f80edbd905 | 14,591 |
def register_libtype(cls):
"""Registry of library types we may come across when parsing XML.
This allows us to define a few helper functions to dynamically convery
the XML into objects. See buildItem() below for an example.
"""
LIBRARY_TYPES[cls.TYPE] = cls
return cls | bb94e9f73ec04be834fa6be7de0cebf7c10a57ec | 14,592 |
def construct_tablepath(fmdict, prefix=''):
"""
Construct a suitable pathname for a CASA table made from fmdict,
starting with prefix. prefix can contain a /.
If prefix is not given, it will be set to
"ephem_JPL-Horizons_%s" % fmdict['NAME']
"""
if not prefix:
prefix = "ephem_JPL-Horizons_%s" % fmdict['NAME']
return prefix + "_%.0f-%.0f%s%s.tab" % (fmdict['earliest']['m0']['value'],
fmdict['latest']['m0']['value'],
fmdict['latest']['m0']['unit'],
fmdict['latest']['refer']) | 95041aab91ac9994ef2068d5e05f6cd63969d94e | 14,593 |
def _grad_mulAux(kern,x,y,yerr,original_kernel):
"""
__grad_mulAux() its necesary when we are dealing with multiple terms of
sums and multiplications, example: ES*ESS + ES*ESS*WN + RQ*ES*WN and not
having everything breaking apart
Parameters
kern = kernel in use
x = range of values of the independent variable (usually time)
y = range of values of te dependent variable (the measurments)
yerr = error in the measurments
original_kernel = original kernel (original sum) being used
Returns
See _grad_mul(kernel,x,y,yerr) for more info
"""
original_kernel = original_kernel
cov_matrix = build_matrix(original_kernel,x,yerr)
listof__kernels = [kern.__dict__["k2"]] #to put each kernel separately
kernel_k1 = kern.__dict__["k1"]
while len(kernel_k1.__dict__) == 2:
listof__kernels.insert(0,kernel_k1.__dict__["k2"])
kernel_k1=kernel_k1.__dict__["k1"]
listof__kernels.insert(0,kernel_k1) #each kernel is now separated
kernelaux1 = []; kernelaux2 = []
for i, e in enumerate(listof__kernels):
kernelaux1.append(listof__kernels[i])
kernelaux2.append(_kernel_deriv(listof__kernels[i]))
grad_result = []
kernelaux11 = kernelaux1; kernelaux22 = kernelaux2
ii = 0
while ii<len(listof__kernels):
kernelaux11 = kernelaux1[:ii] + kernelaux1[ii+1 :]
_kernels = _np.prod(_np.array(kernelaux11))
for ij, e in enumerate(kernelaux22[ii]):
result = _grad_lp(kernelaux2[ii][ij]*_kernels,x,y,yerr,cov_matrix)
grad_result.insert(0,result)
kernelaux11 = kernelaux1;kernelaux22=kernelaux2
ii = ii+1
grad_result = grad_result[::-1]
return grad_result | ce140d8a73a8304d0601077b5ed01f93cb17deab | 14,594 |
def get_unbiased_p_hat(number_candidates, c1, c2, p):
"""Get the p_hat to unbias miracle.
Args:
number_candidates: The number of candidates to be sampled.
c1: The factor that the conditional density of z given x is proportional to
if the inner product between x and z is more than gamma.
c2: The factor that the conditional density of z given x is proportional to
if the inner product between x and z is less than gamma.
p: The probability with which privunit samples an unit vector from the
shaded spherical cap associated with input (see original privunit paper).
Returns:
p_hat: The probability with which unbiased miracle will sample an unit
vector from the shaded spherical cap associated with input.
"""
# Compute the fraction of candidates that lie inside the cap.
beta = np.array(range(number_candidates + 1)) / number_candidates
pi_in = 1 / number_candidates * (c1 / (beta * c1 + (1 - beta) * c2))
p_hat = np.sum(
stats.binom.pmf(range(number_candidates + 1), number_candidates, p / c1) *
range(number_candidates + 1) * pi_in)
return p_hat | 5d45696557835f2bc655b601e015bb08356fe2dd | 14,595 |
def prox_gradf(xy, step):
"""Gradient step"""
return xy-step*grad_f(xy) | 7700850b9bfb5c5be5f0a63a678df93991673d81 | 14,596 |
import numpy
import math
def CBND(x, y, rho):
"""
A function for computing bivariate normal probabilities.
::
Alan Genz
Department of Mathematics
Washington State University
Pullman, WA 99164-3113
Email : [email protected]
This function is based on the method described by
::
Drezner, Z and G.O. Wesolowsky, (1990),
On the computation of the bivariate normal integral,
Journal of Statist. Comput. Simul. 35, pp. 101-107,
with major modifications for double precision, and for ``|R|`` close to 1.
This code was originally transelated into VBA by Graeme West
"""
W = numpy.zeros((11,4))
XX = numpy.zeros((11,4))
W[1][1] = 0.17132449237917
XX[1][1] = -0.932469514203152
W[2][1] = 0.360761573048138
XX[2][1] = -0.661209386466265
W[3][1] = 0.46791393457269
XX[3][1] = -0.238619186083197
W[1][2] = 4.71753363865118E-02
XX[1][2] = -0.981560634246719
W[2][2] = 0.106939325995318
XX[2][2] = -0.904117256370475
W[3][2] = 0.160078328543346
XX[3][2] = -0.769902674194305
W[4][2] = 0.203167426723066
XX[4][2] = -0.587317954286617
W[5][2] = 0.233492536538355
XX[5][2] = -0.36783149899818
W[6][2] = 0.249147045813403
XX[6][2] = -0.125233408511469
W[1][3] = 1.76140071391521E-02
XX[1][3] = -0.993128599185095
W[2][3] = 4.06014298003869E-02
XX[2][3] = -0.963971927277914
W[3][3] = 6.26720483341091E-02
XX[3][3] = -0.912234428251326
W[4][3] = 8.32767415767048E-02
XX[4][3] = -0.839116971822219
W[5][3] = 0.10193011981724
XX[5][3] = -0.746331906460151
W[6][3] = 0.118194531961518
XX[6][3] = -0.636053680726515
W[7][3] = 0.131688638449177
XX[7][3] = -0.510867001950827
W[8][3] = 0.142096109318382
XX[8][3] = -0.37370608871542
W[9][3] = 0.149172986472604
XX[9][3] = -0.227785851141645
W[10][3] = 0.152753387130726
XX[10][3] = -7.65265211334973E-02
if numpy.abs(rho) < 0.3:
NG = 1
LG = 3
elif numpy.abs(rho) < 0.75:
NG = 2
LG = 6
else:
NG = 3
LG = 10
h = -x
k = -y
hk = h * k
BVN = 0
if numpy.abs(rho) < 0.925:
if numpy.abs(rho) > 0:
hs = (h * h + k * k) / 2.
asr = math.asin(rho)
for i in range(1,LG+1):
for ISs in [-1,1]:
sn = math.sin(asr * (ISs * XX[i][NG] + 1) / 2)
BVN = BVN + W[i][NG] * numpy.exp((sn * hk - hs) / (1 - sn * sn))
BVN = BVN * asr / (4. * numpy.pi)
BVN = BVN + CND(-h) * CND(-k)
else:
if rho < 0:
k = -k
hk = -hk
if numpy.abs(rho) < 1.:
Ass = (1. - rho) * (1. + rho)
A = numpy.sqrt(Ass)
bs = (h - k) ** 2
c = (4. - hk) / 8.
d = (12. - hk) / 16.
asr = -(bs / Ass + hk) / 2.
if asr > -100:
BVN = A * numpy.exp(asr) * (1 - c * (bs - Ass) * (1 - d * bs / 5.) / 3. + c * d * Ass * Ass / 5.)
if -hk < 100:
b = numpy.sqrt(bs)
BVN = BVN - numpy.exp(-hk / 2.) * numpy.sqrt(2. * numpy.pi) * CND(-b / A) * b * (1. - c * bs * (1. - d * bs / 5.) / 3.)
A = A / 2
for i in range(1,LG+1):
for ISs in [-1,1]:
xs = (A * (ISs * XX[i][NG] + 1)) ** 2
rs = numpy.sqrt(1 - xs)
asr = -(bs / xs + hk) / 2
if asr > -100:
BVN = BVN + A * W(i, NG) * numpy.exp(asr) * (numpy.exp(-hk * (1 - rs) / (2 * (1 + rs))) / rs - (1 + c * xs * (1 + d * xs)))
BVN = -BVN / (2. * numpy.pi)
if rho > 0.:
BVN = BVN + CND(-max(h, k))
else:
BVN = -BVN
if k > h:
BVN = BVN + CND(k) - CND(h)
CBND = BVN
return CBND | 3b418e50acec31482df7137f484d396b1673d476 | 14,597 |
def prune(root: Node, copy: bool = True) -> Node:
"""
Prune (or simplify) the given SPN to a minimal and equivalent SPN.
:param root: The root of the SPN.
:param copy: Whether to copy the SPN before pruning it.
:return: A minimal and equivalent SPN.
:raises ValueError: If the SPN structure is not a directed acyclic graph (DAG).
:raises ValueError: If an unknown node type is found.
"""
# Copy the SPN before proceeding, if specified
if copy:
root = deepcopy(root)
# Check the SPN
check_spn(root, labeled=True, smooth=True, decomposable=True)
nodes = topological_order(root)
if nodes is None:
raise ValueError("SPN structure is not a directed acyclic graph (DAG)")
# Build a dictionary that maps each id of a node to the corresponding node object
nodes_map = dict(map(lambda n: (n.id, n), nodes))
# Proceed by reversed topological order
for node in reversed(nodes):
# Skip leaves
if isinstance(node, Leaf):
continue
# Retrieve the children nodes from the mapping
children_nodes = list(map(lambda n: nodes_map[n.id], node.children))
if len(children_nodes) == 1:
nodes_map[node.id] = children_nodes[0]
elif isinstance(node, Product):
# Subsequent product nodes, concatenate the children of them
children = list()
for child in children_nodes:
if not isinstance(child, Product):
children.append(child)
continue
product_children = map(lambda n: nodes_map[n.id], child.children)
children.extend(product_children)
nodes_map[node.id].children = children
elif isinstance(node, Sum):
# Subsequent sum nodes, concatenate the children of them and adjust the weights accordingly
# Important! This implementation take care also of directed acyclic graphs (DAGs)
children_weights = defaultdict(float)
for i, child in enumerate(children_nodes):
if not isinstance(child, Sum):
children_weights[child] += node.weights[i]
continue
sum_children = map(lambda n: nodes_map[n.id], child.children)
for j, sum_child in enumerate(sum_children):
children_weights[sum_child] += node.weights[i] * child.weights[j]
children, weights = zip(*children_weights.items())
nodes_map[node.id].weights = np.array(weights, dtype=node.weights.dtype)
nodes_map[node.id].children = children
else:
raise ValueError("Unknown node type called {}".format(node.__class__.__name__))
return assign_ids(nodes_map[root.id]) | e242156bca1d8a3be8ca673a6629dadf967ccb5b | 14,598 |
def GetRevisionAndLogs(slave_location, build_num):
"""Get a revision number and log locations.
Args:
slave_location: A URL or a path to the build slave data.
build_num: A build number.
Returns:
A pair of the revision number and a list of strings that contain locations
of logs. (False, []) in case of error.
"""
if slave_location.startswith('http://'):
location = slave_location + '/builds/' + str(build_num)
else:
location = os.path.join(slave_location, str(build_num))
revision = False
logs = []
fp = None
try:
if location.startswith('http://'):
fp = urllib2.urlopen(location)
contents = fp.read()
revisions = re.findall(r'<td class="left">got_revision</td>\s+'
'<td>(\d+)</td>\s+<td>Source</td>', contents)
if revisions:
revision = revisions[0]
logs = [location + link + '/text' for link
in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)]
else:
fp = open(location, 'rb')
build = cPickle.load(fp)
properties = build.getProperties()
if properties.has_key('got_revision'):
revision = build.getProperty('got_revision')
candidates = os.listdir(slave_location)
logs = [os.path.join(slave_location, filename)
for filename in candidates
if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)]
except urllib2.URLError, e:
logging.exception('Error reading build URL "%s": %s', location, str(e))
return False, []
except (IOError, OSError), e:
logging.exception('Error reading build file "%s": %s', location, str(e))
return False, []
finally:
if fp:
fp.close()
return revision, logs | fb8b25a0f33194af288d411b2218edf904ab9f14 | 14,599 |
import json
def get_json(url, **kwargs):
"""Downloads json data and converts it to a dict"""
raw = get(url, **kwargs)
if raw == None:
return None
return json.loads(raw.decode('utf8')) | 16504c03beaa1a5913f2256ad6a1871049694e14 | 14,600 |
def get_text(im):
"""
得到图像中的文本部分
"""
return im[3:24, 116:288] | 86db2a16372aacb6cde29a2bf16c84f14f65d715 | 14,601 |
def homepage():
"""Display tweets"""
tweet_to_db()
output = [a for a in Tweet.query.order_by(desc('time_created')).all()]
# to display as hyper links
for tweet in output:
tweet.handle = linkyfy(tweet.handle, is_name=True)
tweet.text = linkyfy(tweet.text)
return render_template("home.html", output=output) | d37138ea2ed6bdf8a650e644943e330400417f57 | 14,602 |
def watch_list_main_get():
"""
Render watch list page.
Author: Jérémie Dierickx
"""
watchlist = env.get_template('watchlists.html')
return header("Watch List") + watchlist.render(user_name=current_user.pseudo) + footer() | 760c8f2acf4a3ea1791860568ae747b9bd35593c | 14,603 |
def input_file(inp_str):
""" Parse the input string
"""
# Parse the sections of the input into keyword-val dictionaries
train_block = ioformat.ptt.symb_block(inp_str, '$', 'training_data')
fform_block = ioformat.ptt.symb_block(inp_str, '$', 'functional_form')
exec_block = ioformat.ptt.symb_block(inp_str, '$', 'fortran_execution')
train_dct = ioformat.ptt.keyword_dct_from_block(
train_block[1], formatvals=False)
fform_dct = ioformat.ptt.keyword_dct_from_block(
fform_block[1], formatvals=False)
exec_dct = ioformat.ptt.keyword_dct_from_block(
exec_block[1], formatvals=False)
# Set defaults (maybe use fancy version later if more defaults can be set)
if 'Units' not in train_dct:
train_dct['Units'] = DEFAULT_DCT['Units']
# Check that the dictionaries are built correctly
_check_dcts(train_dct, fform_dct, exec_dct)
return train_dct, fform_dct, exec_dct | 9ae7c55c59e6b89b43271836738fd4fffbf38455 | 14,604 |
def recursiveUpdate(target, source):
"""
Recursively update the target dictionary with the source dictionary, leaving unfound keys in place.
This is different than dict.update, which removes target keys not in the source
:param dict target: The dictionary to be updated
:param dict source: The dictionary to be integrated
:return: target dict is returned as a convenience. This function updates the target dict in place.
:rtype: dict
"""
for k, v in source.items():
if isinstance(v, dict):
target[k] = recursiveUpdate(target.get(k, {}), v)
else:
target[k] = v
return target | e1c11d0801be9526e8e73145b1dfc7be204fc7d0 | 14,606 |
import time
import requests
import json
def macro_bank_usa_interest_rate():
"""
美联储利率决议报告, 数据区间从19820927-至今
https://datacenter.jin10.com/reportType/dc_usa_interest_rate_decision
https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v=1578581921
:return: 美联储利率决议报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
res = requests.get(
f"https://cdn.jin10.com/dc/reports/dc_usa_interest_rate_decision_all.js?v={str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)}"
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["美国利率决议"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "ec",
"attr_id": "24",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_usa_michigan_consumer_sentiment",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
}
r = requests.get(url, params=params, headers=headers)
temp_se = pd.DataFrame(r.json()["data"]["values"]).iloc[:, :2]
temp_se.index = pd.to_datetime(temp_se.iloc[:, 0])
temp_se = temp_se.iloc[:, 1]
temp_df = temp_df.append(temp_se)
temp_df.dropna(inplace=True)
temp_df.sort_index(inplace=True)
temp_df = temp_df.reset_index()
temp_df.drop_duplicates(subset="index", inplace=True)
temp_df.set_index("index", inplace=True)
temp_df = temp_df.squeeze()
temp_df.index.name = None
temp_df.name = "usa_interest_rate"
temp_df = temp_df.astype("float")
return temp_df | 52885b4cfbb607d3ecbb0f89f19cac7e1f097ccd | 14,607 |
def get_kwd_group(soup):
"""
Find the kwd-group sections for further analysis to find
subject_area, research_organism, and keywords
"""
kwd_group = None
kwd_group = extract_nodes(soup, 'kwd-group')
return kwd_group | 626a85b5274880d1e4520f4afe5a270e5f20832a | 14,608 |
def read_transport_file(input_file_name):
"""
Reads File "input_file_name".dat, and returns lists containing the atom
indices of the device atoms, as well as the atom indices of
the contact atoms. Also, a dictionary "interaction_distances" is generated,
which spcifies the maximum interaction distance between each type of atom.
"""
transport_file_path = "./" + INPUT_FOLDER_NAME + "/" + \
str(input_file_name) + "_" + "transport.dat"
file = open(transport_file_path, 'r')
max_file_lines = 1000
iterations = 0
# IMPORTANT: In file, first atom has index is one, but in my program,
# first atom has index is zero
region_list = [] # List of regions, starting with device region
line = file.readline()
entries = line.split()
#A single list of device atom indices.
device_region = []
# A list of lists, one list of atom indices for each contact.
contact_regions = []
iterations = 0
while iterations < max_file_lines:
new_indices = list(range(int(entries[1]) - 1, int(entries[2])))
if "Device" in entries[0]:
# Don't append, because we want a single list of indices for the
# device region.
device_region = device_region + new_indices
if "Contact" in entries[0]:
contact_regions.append(new_indices)
line = file.readline()
entries = line.split()
iterations += 1
if not("Device" in entries[0] or "Contact" in entries[0]):
break
region_list.append(device_region)
region_list += contact_regions
interaction_distances = {}
#line = file.readline()
#stripped_line = line.replace(" ", "").replace("\n", "")
#entries = line.split()
# loop terminates at first empty line, or at end of file
# (since readline() returns empty string at end of file)
iterations = 0
while iterations < max_file_lines:
key = entries[0] + entries[1]
interaction_distances[key] = float(entries[2])
line = file.readline()
entries = line.split()
iterations += 1
stripped_line = line.replace(" ", "").replace("\n", "")
if stripped_line == '':
break
# print("In read_transport_file: " + str(region_list))
return (region_list, interaction_distances) | d62e3cc1dfbe2ac4865579dca86133bedb06182f | 14,609 |
def handle_srv6_path(operation, grpc_address, grpc_port, destination,
segments=None, device='', encapmode="encap", table=-1,
metric=-1, bsid_addr='', fwd_engine='linux', key=None,
update_db=True, db_conn=None, channel=None):
"""
Handle a SRv6 Path.
"""
# Dispatch depending on the operation
if operation == 'add':
return add_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'get':
return get_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'change':
return change_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
if operation == 'del':
return del_srv6_path(
grpc_address=grpc_address,
grpc_port=grpc_port,
destination=destination,
segments=segments,
device=device,
encapmode=encapmode,
table=table,
metric=metric,
bsid_addr=bsid_addr,
fwd_engine=fwd_engine,
key=key,
update_db=update_db,
db_conn=db_conn,
channel=channel
)
# Operation not supported, raise an exception
logger.error('Operation not supported')
raise utils.OperationNotSupportedException | 3181f9b4e99a6414c92614caee7af0ff133ad01d | 14,610 |
def mask_outside_polygon(poly_verts, ax, facecolor=None, edgecolor=None, alpha=0.25):
"""
Plots a mask on the specified axis ("ax", defaults to plt.gca()) such that
all areas outside of the polygon specified by "poly_verts" are masked.
"poly_verts" must be a list of tuples of the verticies in the polygon in
counter-clockwise order.
Returns the matplotlib.patches.PathPatch instance plotted on the figure.
"""
# Get current plot limits
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Verticies of the plot boundaries in clockwise order
bound_verts = [
(xlim[0], ylim[0]),
(xlim[0], ylim[1]),
(xlim[1], ylim[1]),
(xlim[1], ylim[0]),
(xlim[0], ylim[0]),
]
# A series of codes (1 and 2) to tell matplotlib whether to draw a line or
# move the "pen" (So that there's no connecting line)
bound_codes = [mpath.Path.MOVETO] + (len(bound_verts) - 1) * [mpath.Path.LINETO]
poly_codes = [mpath.Path.MOVETO] + (len(poly_verts) - 1) * [mpath.Path.LINETO]
# Plot the masking patch
path = mpath.Path(bound_verts + poly_verts, bound_codes + poly_codes)
patch = mpatches.PathPatch(
path, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha
)
patch = ax.add_patch(patch)
# Reset the plot limits to their original extents
ax.set_xlim(xlim)
ax.set_ylim(ylim)
return patch | 1c46d12d7f3c92e3ff4522bb88713eae3c9138b1 | 14,611 |
def setup_data(cluster):
"""
Get decision boundaries by means of np.meshgrid
:return: Tuple (vectors, centroids, X component of mesghgrid, Y component of meshgrid, )
"""
feature_vectors, _, centroids, _, kmeans = cluster
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .2 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = feature_vectors[:, 0].min() - 1, feature_vectors[:, 0].max() + 1
y_min, y_max = feature_vectors[:, 1].min() - 1, feature_vectors[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
return feature_vectors, centroids, xx, yy, Z | edcfede8e8f7fc18fc9e7255127f0f14688df2f2 | 14,612 |
from datetime import datetime
def list_errors(
conx: Connection,
) -> t.List[t.Tuple[int, datetime.datetime, str, str, str, str]]:
"""Return list of all errors.
The list returned contains each error as an element in the list. Each
element is a tuple with the following layout:
(seq nr, date, err msg, err detail, level, state mask)
The 'err detail' and 'level' elements are not always present and thus
may be empty.
NOTE: this method is expensive and slow, as it retrieves a file from
the controller over FTP and parses it.
:returns: A list of all errors and their details
:rtype: list(tuple(int, datetime.datetime, str, str, str, str))
"""
errs = get_file_as_bytes(conx, remote_name='/md:/errall.ls')
res = []
for line in errs.decode('ascii').splitlines():
# check for really empty lines
if ('Robot Name' in line) or (line == ''):
continue
fields = list(map(str.strip, line.split('"')))
# check for empty rows (seen on just installed controllers)
if not fields[2]:
continue
# probably OK, try to continue parsing
level_state = fields[4].split()
if len(level_state) > 1:
(
err_level,
err_state,
) = level_state
else:
err_level, err_state, = (
'',
level_state[0],
)
stamp = datetime.datetime.strptime(fields[1], '%d-%b-%y %H:%M:%S')
res.append((int(fields[0]), stamp, fields[2], fields[3], err_level, err_state))
return res | 2aea677d8e69a76c5a6922d9c7e6ce3078ad7488 | 14,614 |
async def get_incident(incident_id):
"""
Get incident
---
get:
summary: Get incident
tags:
- incidents
parameters:
- name: id
in: path
required: true
description: Object ID
responses:
200:
description: The requested object
content:
application/json:
schema: Incident
"""
incident = g.Incident.find_by_id(incident_id)
if incident is None:
raise exceptions.NotFound(description="Incident {} was not found".format(incident_id))
return jsonify(incident), HTTPStatus.OK | f70703b43944dfa2385a2a35249dd692fe18a1ba | 14,615 |
from typing import List
def partition_vector(vector, sets, fdtype: str='float64') -> List[NDArrayNfloat]: # pragma: no cover
"""partitions a vector"""
vectors = []
for unused_aname, aset in sets:
if len(aset) == 0:
vectors.append(np.array([], dtype=fdtype))
continue
vectori = vector[aset]
vectors.append(vectori)
return vectors | e73494d146ec56a8287c0e0e3ec3dec7f7d93c37 | 14,616 |
def calculate_G4(
n_numbers,
neighborsymbols,
neighborpositions,
G_elements,
theta,
zeta,
eta,
Rs,
cutoff,
cutofffxn,
Ri,
normalized=True,
image_molecule=None,
n_indices=None,
weighted=False,
):
"""Calculate G4 symmetry function.
These are 3 body or angular interactions.
Parameters
----------
n_symbols : list of int
List of neighbors' chemical numbers.
neighborsymbols : list of str
List of symbols of neighboring atoms.
neighborpositions : list of list of floats
List of Cartesian atomic positions of neighboring atoms.
G_elements : list of str
A list of two members, each member is the chemical species of one of
the neighboring atoms forming the triangle with the center atom.
theta : float
Parameter of Gaussian symmetry functions.
zeta : float
Parameter of Gaussian symmetry functions.
eta : float
Parameter of Gaussian symmetry functions.
Rs : float
Parameter to shift the center of the peak.
cutoff : float
Cutoff radius.
cutofffxn : object
Cutoff function.
Ri : list
Position of the center atom. Should be fed as a list of three floats.
normalized : bool
Whether or not the symmetry function is normalized.
image_molecule : ase object, list
List of atoms in an image.
n_indices : list
List of indices of neighboring atoms from the image object.
weighted : bool
True if applying weighted feature of Gaussian function. See Ref. 2.
Returns
-------
feature : float
G4 feature value.
Notes
-----
The difference between the calculate_G3 and the calculate_G4 function is
that calculate_G4 accounts for bond angles of 180 degrees.
"""
feature = 0.0
counts = range(len(neighborpositions))
for j in counts:
for k in counts[(j + 1) :]:
els = sorted([neighborsymbols[j], neighborsymbols[k]])
if els != G_elements:
continue
Rij_vector = neighborpositions[j] - Ri
Rij = np.linalg.norm(Rij_vector)
Rik_vector = neighborpositions[k] - Ri
Rik = np.linalg.norm(Rik_vector)
cos_theta_ijk = np.dot(Rij_vector, Rik_vector) / Rij / Rik
theta_ijk = np.arccos(
np.clip(cos_theta_ijk, -1.0, 1.0)
) # Avoids rounding issues
cos_theta = np.cos(theta_ijk - theta)
term = (1.0 + cos_theta) ** zeta
term *= np.exp(-eta * ((Rij + Rik) / 2.0 - Rs) ** 2.0)
if weighted:
term *= weighted_h(image_molecule, n_indices)
term *= cutofffxn(Rij)
term *= cutofffxn(Rik)
feature += term
feature *= 2.0 ** (1.0 - zeta)
return feature | 5a864c615d2b835da4bb3d99435b9e2e2a40e136 | 14,617 |
def paths_from_root(graph, start):
"""
Generates paths from `start` to every other node in `graph` and puts it in
the returned dictionary `paths`.
ie.: `paths_from_node(graph, start)[node]` is a list of the edge names used
to get to `node` form `start`.
"""
paths = {start: []}
q = [start]
seen = set()
while q:
node = q.pop()
seen.add(node)
for relation, child in graph[node]:
if isnode(child) and child not in seen:
q.append(child)
paths[child] = paths[node] + [relation]
return paths | 9b8399b67e14a6fbfe0d34c087317d06695bca65 | 14,619 |
from typing import Sequence
from typing import Optional
from typing import Callable
from typing import Dict
def list_to_dict(l:Sequence, f:Optional[Callable]=None) -> Dict:
""" Convert the list to a dictionary in which keys and values are adjacent
in the list. Optionally, a function `f` can be passed to apply to each value
before adding it to the dictionary.
Parameters
----------
l: typing.Sequence
The list of items
f: typing.Callable
A function to apply to each value before inserting it into the list.
For example, `float` could be passed to convert each value to a float.
Returns
-------
d: typing.Dict
The dictionary, defined as described above
Examples
--------
.. code-block:: python
l = ["key1", "value1", "key2", "value2"]
list_to_dict(l, f) == {"key1": f("value1"), "key2": f("value2")}
"""
if len(l) % 2 != 0:
msg = ("[collection_utils.list_to_dict]: the list must contain an even number"
"of elements")
raise ValueError(msg)
if f is None:
f = lambda x: x
keys = l[::2]
values = l[1::2]
d = {k:f(v) for k, v in zip(keys, values)}
return d | a1f47582a2de8fa47bbf4c79c90165f8cf703ca1 | 14,620 |
def inner(a, b):
"""Computes an inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex conjugation).
Parameters
----------
a, b : array_like
If *a* and *b* are nonscalar, their shape must match.
Returns
-------
out : ndarray
out.shape = a.shape[:-1] + b.shape[:-1]
Restriction
-----------
If *a* or *b* is not 1-D array : *NotImplementedError* occurs.
Note
----
For vectors (1-D arrays) it computes the ordinary inner-product::
import nlcpy as vp
vp.inner(a, b) # equivalent to sum(a[:]*b[:])
if *a* or *b* is scalar, in which case::
vp.inner(a, b) # equivalent to a*b
See Also
--------
dot : Computes a dot product of two arrays.
Examples
--------
Ordinary inner product for vectors:
>>> import nlcpy as vp
>>> a = vp.array([1,2,3])
>>> b = vp.array([0,1,0])
>>> vp.inner(a, b)
array(2)
An example where b is a scalar:
>>> vp.inner(vp.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
a = nlcpy.asanyarray(a)
b = nlcpy.asanyarray(b)
if a.ndim == 0 or b.ndim == 0:
return ufunc_op.multiply(a, b)
elif a.ndim == 1 and b.ndim == 1:
return cblas_wrapper.cblas_dot(a, b)
else:
raise NotImplementedError("Only 1-D array is supported.") | 248f1069251770073bc6bb4eedda0ef557aaeb9f | 14,621 |
from typing import Sequence
from re import T
def remove_list_redundancies(lst: Sequence[T]) -> list[T]:
"""
Used instead of list(set(l)) to maintain order
Keeps the last occurrence of each element
"""
return list(reversed(dict.fromkeys(reversed(lst)))) | f17408e7c3e3f5b2994e943b668c81b71933a2c9 | 14,622 |
def bpformat(bp):
"""
Format the value like a 'human-readable' file size (i.e. 13 Kbp, 4.1 Mbp,
102 bp, etc.).
"""
try:
bp = int(bp)
except (TypeError, ValueError, UnicodeDecodeError):
return avoid_wrapping("0 bp")
def bp_number_format(value):
return formats.number_format(round(value, 1), 1)
kbp = 1 << 10
mbp = 1 << 20
gbp = 1 << 30
tbp = 1 << 40
pbp = 1 << 50
negative = bp < 0
if negative:
bp = -bp # Allow formatting of negative numbers.
if bp < kbp:
value = "%(size)d byte" % {"size": bp}
elif bp < mbp:
value = "%s Kbp" % bp_number_format(bp / kbp)
elif bp < gbp:
value = "%s Mbp" % bp_number_format(bp / mbp)
elif bp < tbp:
value = "%s Gbp" % bp_number_format(bp / gbp)
elif bp < pbp:
value = "%s Tbp" % bp_number_format(bp / tbp)
else:
value = "%s Pbp" % bp_number_format(bp / bp)
if negative:
value = "-%s" % value
return avoid_wrapping(value) | 4c2b587b3aecd4dd287f7f04b3860f63440154a1 | 14,625 |
def get_module_id_from_event(event):
"""
Helper function to get the module_id from an EventHub message
"""
if "iothub-connection-module_id" in event.message.annotations:
return event.message.annotations["iothub-connection-module-id".encode()].decode()
else:
return None | e183824fff183e3f95ef35c623b13245eb68a8b7 | 14,626 |
def pipe_literal_representer(dumper, data):
"""Create a representer for pipe literals, used internally for pyyaml."""
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') | b73e7d451ae50bc4638d3cb45546f2a197765717 | 14,627 |
def RecognitionNeuralNetworkModelSmall(ih, iw, ic, nl):
"""
A simple model used to test the machinery on TrainSmall2.
ih, iw, ic - describe the dimensions of the input image
mh, mw - describe the dimensions of the output mask
"""
dropout = 0.1
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=(ih, iw, ic)))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(32, activation="relu"))
#model.add(Dropout(0.5))
model.add(Dense((nl), activation="softmax"))
model.compile(loss="categorical_crossentropy",
optimizer="adadelta",
metrics=["accuracy"])
print("\n ---> Model summary <--- \n")
model.summary()
return model | e4ec0ccb958eb9b406c079aeb9526e5adc1f6978 | 14,628 |
def _quick_rec_str(rec):
"""try to print an identifiable description of a record"""
if rec['tickets']:
return "[tickets: %s]" % ", ".join(rec["tickets"])
else:
return "%s..." % rec["raw_text"][0:25] | e666198de84fe9455ad2cee59f8ed85144589be0 | 14,629 |
from typing import Collection
def A006577(start: int = 0, limit: int = 20) -> Collection[int]:
"""Number of halving and tripling steps to reach 1 in '3x+1' problem,
or -1 if 1 is never reached.
"""
def steps(n: int) -> int:
if n == 1:
return 0
x = 0
while True:
if n % 2 == 0:
n //= 2
else:
n = 3 * n + 1
x += 1
if n < 2:
break
return x
return [steps(n) for n in range(start, start + limit)] | 47829838af8e2fdb191fdefa755e728db9c09559 | 14,630 |
def split_to_sentences_per_pages(text):
""" splitting pdfminer outputted text into list of pages and cleanup
paragraphs"""
def split_into_sentences(line):
"""cleanup paragraphs"""
return ifilter(None, (i.strip() for i in line.split('\n\n')))
return ifilter(None, imap(split_into_sentences, text.split('\x0c'))) | b40ac7d6b4f3e9482897934999858271aeaf9494 | 14,631 |
def lookup(_id=None, article_id=None, user_id=None, mult=False):
"""
Lookup a reaction in our g.db
"""
query = {}
if article_id:
query["article_id"] = ObjectId(article_id)
if user_id:
query["user_id"] = ObjectId(user_id)
if _id:
query["_id"] = ObjectId(_id)
if mult:
return g.db.reactions.find(query)
else:
return g.db.reactions.find_one(query) | 5d3c064278da8419e6305508a3bf47bba60c818c | 14,632 |
def connection(user='m001-student', password='m001-mongodb-basics'):
"""connection: This function connects mongoDB to get MongoClient
Args:
user (str, optional): It's user's value for URL ATLAS srv. Defaults to 'm001-student'.
password (str, optional): It's password's value for URL ATLAS srv. Defaults to 'm001-mongodb-basics'.
Returns:
object: Returns a MongoClient object
"""
try:
MONGO_URL_ATLAS = f'mongodb+srv://{user}:{password}@sandbox.dec55.mongodb.net/?retryWrites=true&w=majority'
mongo = pymongo.MongoClient(MONGO_URL_ATLAS, tlsAllowInvalidCertificates=False)
except pymongo.errors.ConnectionFailure as conn_error:
print("ERROR - Cannot connect to DataBase", conn_error)
else:
print('Correct Connection!!')
return mongo | 0714ffa01aa21dd71d6eefcadb0ebc2379cd3e6f | 14,633 |
import random
import asyncio
async def get_selection(ctx, choices, delete=True, pm=False, message=None, force_select=False):
"""Returns the selected choice, or None. Choices should be a list of two-tuples of (name, choice).
If delete is True, will delete the selection message and the response.
If length of choices is 1, will return the only choice unless force_select is True.
:raises NoSelectionElements if len(choices) is 0.
:raises SelectionCancelled if selection is cancelled."""
if len(choices) == 0:
raise NoSelectionElements()
elif len(choices) == 1 and not force_select:
return choices[0][1]
page = 0
pages = paginate(choices, 10)
m = None
selectMsg = None
def chk(msg):
valid = [str(v) for v in range(1, len(choices) + 1)] + ["c", "n", "p"]
return msg.author == ctx.author and msg.channel == ctx.channel and msg.content.lower() in valid
for n in range(200):
_choices = pages[page]
names = [o[0] for o in _choices if o]
embed = discord.Embed()
embed.title = "Multiple Matches Found"
selectStr = "Which one were you looking for? (Type the number or \"c\" to cancel)\n"
if len(pages) > 1:
selectStr += "`n` to go to the next page, or `p` for previous\n"
embed.set_footer(text=f"Page {page + 1}/{len(pages)}")
for i, r in enumerate(names):
selectStr += f"**[{i + 1 + page * 10}]** - {r}\n"
embed.description = selectStr
embed.colour = random.randint(0, 0xffffff)
if message:
embed.add_field(name="Note", value=message, inline=False)
if selectMsg:
try:
await selectMsg.delete()
except:
pass
if not pm:
selectMsg = await ctx.channel.send(embed=embed)
else:
embed.add_field(name="Instructions",
value="Type your response in the channel you called the command. This message was PMed to "
"you to hide the monster name.", inline=False)
selectMsg = await ctx.author.send(embed=embed)
try:
m = await ctx.bot.wait_for('message', timeout=30, check=chk)
except asyncio.TimeoutError:
m = None
if m is None:
break
if m.content.lower() == 'n':
if page + 1 < len(pages):
page += 1
else:
await ctx.channel.send("You are already on the last page.")
elif m.content.lower() == 'p':
if page - 1 >= 0:
page -= 1
else:
await ctx.channel.send("You are already on the first page.")
else:
break
if delete and not pm:
try:
await selectMsg.delete()
await m.delete()
except:
pass
if m is None or m.content.lower() == "c":
raise SelectionCancelled()
return choices[int(m.content) - 1][1] | 663f60c73bc6c1e3d7db5992b6dbb6d6953d0e24 | 14,634 |
def total_variation(images, name=None):
"""Calculate and return the total variation for one or more images.
(A mirror to tf.image total_variation)
The total variation is the sum of the absolute differences for neighboring
pixel-values in the input images. This measures how much noise is in the
images.
This can be used as a loss-function during optimization so as to suppress
noise in images. If you have a batch of images, then you should calculate
the scalar loss-value as the sum:
`loss = tf.reduce_sum(tf.image.total_variation(images))`
This implements the anisotropic 2-D version of the formula described here:
https://en.wikipedia.org/wiki/Total_variation_denoising
Args:
images: 4-D Tensor of shape `[batch, height, width, channels]` or
3-D Tensor of shape `[height, width, channels]`.
name: A name for the operation (optional).
Raises:
ValueError: if images.shape is not a 3-D or 4-D vector.
Returns:
The total variation of `images`.
If `images` was 4-D, return a 1-D float Tensor of shape `[batch]` with the
total variation for each image in the batch.
If `images` was 3-D, return a scalar float with the total variation for
that image.
"""
return tf.image.total_variation(images=images, name=name) | c12e822cd09ff6ea5f9bbc45ffa71121de5ff3e7 | 14,636 |
def get_vivareal_data(driver_path: str, address: str, driver_options: Options = None) -> list:
"""
Scrapes vivareal site and build a array of maps in the following format:
[
{
"preço": int,
"valor_de_condominio": int,
"banheiros": int,
"quartos": int,
"área": int,
"vagas": int,
"endereço": str
"texto": str
},
...
]
:param address: Address to search for
:param driver_options: driver options
:return: json like string
"""
# Initialize browser
chrome = init_driver(driver_path, driver_options)
chrome.get(SITE)
# Collect data
try:
accept_cookies(chrome)
select_rent_option(chrome)
send_address(chrome, address)
real_state_elements = collect_real_state_raw_data(chrome)
real_state_parsed_data = collect_elements_data(real_state_elements, chrome)
except Exception as e:
print(e)
real_state_parsed_data = None
finally:
chrome.close()
return real_state_parsed_data | e3495c05f39e7cb301fa90e62b5a398a69658e74 | 14,637 |
import logging
def extract_image(data):
"""Tries and extracts the image inside data (which is a zipfile)"""
with ZipFile(BytesIO(data)) as zip_file:
for name in zip_file.namelist()[::-1]:
try:
return Image.open(BytesIO(zip_file.read(name)))
except UnidentifiedImageError:
logging.warning("%s does not seem to be an image", name) | 2aa333d493a1a3ce637fb2d42bca85bbbb089728 | 14,638 |
import six
def construct_getatt(node):
"""
Reconstruct !GetAtt into a list
"""
if isinstance(node.value, (six.text_type, six.string_types)):
return node.value.split(".")
elif isinstance(node.value, list):
return [s.value for s in node.value]
else:
raise ValueError("Unexpected node type: {}".format(type(node.value))) | 657b957a06c79905b557dd397efea2c598d8c6b3 | 14,639 |
def rss(x, y, w, b):
"""residual sum of squares for linear regression
"""
return sum((yi-(xi*wi+b))**2 for xi, yi, wi in zip(x,y, w)) | 955e0b5e3dcf8373fe5ef1b95244d06abe512084 | 14,641 |
def get_index(lang, index):
"""
Given an integer index this function will return the proper string
version of the index based on the language and other considerations
Parameters
----------
lang : str
One of the supported languages
index : int
Returns
-------
str
The string corresponding to the correct index to be formatted into the code
"""
retval = None
if lang in ['fortran', 'matlab']:
return str(index + 1)
if lang in ['c', 'cuda']:
return str(index) | bcb3a88857b13eea95d5a1bb939c9c4e175ea677 | 14,642 |
def sampleFunction(x: int, y: float) -> float:
"""
Multiply int and float sample.
:param x: x value
:type x: int
:param y: y value
:type y: float
:return: result
:return type: float
"""
return x * y | f70708b3ece2574969834a62841da3e4506f704b | 14,643 |
def n_elements_unique_intersection_np_axis_0(a: np.ndarray, b: np.ndarray) -> int:
"""
A lot faster than to calculate the real intersection:
Example with small numbers:
a = [1, 4, 2, 13] # len = 4
b = [1, 4, 9, 12, 25] # (len = 5)
# a, b need to be unique!!!
unique(concat(a, b)) = [1, 4, 2, 13, 9, 12, 25] # (len = 7)
intersect(a, b) = [1, 4] # (len = 2) to expensive to call
# Formular (fast to calculate)
len(intersect(a, b)) = len(b) - n_elements_in_b_and_not_in_a
len(intersect(a, b)) = len(b) - (len(unique(concat(a, b))) - len(a))
"""
a = np.unique(a, axis=0)
b = np.unique(b, axis=0)
return len(b) - (len(np.unique(np.concatenate((a, b), axis=0), axis=0)) - len(a)) | ce8e3cfd158205a0fa2c5f1d10622c6901bc3224 | 14,644 |
import logging
def Setup(test_options):
"""Runs uiautomator tests on connected device(s).
Args:
test_options: A UIAutomatorOptions object.
Returns:
A tuple of (TestRunnerFactory, tests).
"""
test_pkg = test_package.TestPackage(test_options.uiautomator_jar,
test_options.uiautomator_info_jar)
tests = test_pkg.GetAllMatchingTests(test_options.annotations,
test_options.exclude_annotations,
test_options.test_filter)
if not tests:
logging.error('No uiautomator tests to run with current args.')
def TestRunnerFactory(device, shard_index):
return test_runner.TestRunner(
test_options, device, shard_index, test_pkg)
return (TestRunnerFactory, tests) | 2d50c53d211bbddae495a89687cf0cf95b08b1ba | 14,645 |
def barcode_junction_counts(inhandle):
"""Return count dict from vdjxml file with counts[barcode][junction]"""
counts = dict()
for chain in vdj.parse_VDJXML(inhandle):
try: # chain may not have barcode
counts_barcode = counts.setdefault(chain.barcode,dict())
except AttributeError:
continue
counts_barcode[chain.junction] = counts_barcode.get(chain.junction,0) + 1
return counts | 5cc29e44e34989fbd2afb4a2d34f63c7e7adf160 | 14,646 |
def is_following(user, actor):
"""
retorna True si el usuario esta siguiendo al actor
::
{% if request.user|is_following:another_user %}
You are already following {{ another_user }}
{% endif %}
"""
return Follow.objects.is_following(user, actor) | 963ccc2f75f19609943aba6b61a7522573665033 | 14,647 |
from typing import Union
def rf_make_ones_tile(num_cols: int, num_rows: int, cell_type: Union[str, CellType] = CellType.float64()) -> Column:
"""Create column of constant tiles of one"""
jfcn = RFContext.active().lookup('rf_make_ones_tile')
return Column(jfcn(num_cols, num_rows, _parse_cell_type(cell_type))) | 8ed63c974613e0451a3d8c78eac964c93c6f8154 | 14,648 |
def get_block_hash_from_height(height):
"""
Request a block hash by specifying the height
:param str height: a bitcoin block height
:return: a bitcoin block address
"""
resource = f'block-height/{height}'
return call_api(resource) | 877f4c4268cb3c7c36bd530a38d4b32abbedcaf4 | 14,649 |
from typing import Tuple
from typing import Set
from typing import List
def analyze_json(
snippet_data_json: str,
root_dir: str
) -> Tuple[Set[str], Set[str], Set[str], List[pdd.PolyglotDriftData]]:
"""Perform language-agnostic AST analysis on a directory
This function processes a given directory's language-specific
analysis (stored in a polyglot_snippet_data.json file) into a
list of automatically detected snippets. It then augments the
automatic detection results with useful manual data (specified
in .drift-data.yml files). Finally, it repackages all this data
into a tuple containing 4 useful lists of data as shown in the
'returns' section.
Arguments:
snippet_data_json: A path to a polyglot_snippet_data.json
file generated for the specified root_dir
root_dir: The root directory to perform AST analysis on
Returns:
A tuple containing the following:
- A list of tags found (via grep/text search)
within the given directory and its subdirectories
- A list of tags detected (by the AST parser)
within the given directory and its subdirectories
- A list of tags that the AST parser detected,
but chose to ignore (due to constants or user
specification in .drift-data.yml files)
- A list of snippet objects (as typed NamedTuples)
detected by the AST parser in the given directory
and its subdirectories
"""
tuple_methods, test_method_map = _get_data(snippet_data_json)
source_filepaths = set(method.source_path for method in tuple_methods)
grep_tags: Set[str] = set()
ignored_tags: Set[str] = set()
for source_file in source_filepaths:
grep_tag_names, ignored_tag_names = (
_process_file_region_tags(
source_file, snippet_data_json, tuple_methods))
grep_tags = grep_tags.union(grep_tag_names)
ignored_tags = ignored_tags.union(ignored_tag_names)
source_methods = [method for method in tuple_methods
if method.region_tags or
method.name in constants.SNIPPET_INVOCATION_METHODS]
source_methods = _dedupe_source_methods(source_methods)
_store_tests_on_methods(source_methods, test_method_map)
polyglot_parser.add_children_drift_data(source_methods)
yaml_utils.add_yaml_data_to_source_methods(source_methods, root_dir)
source_tags: Set[str] = set()
for method in source_methods:
source_tags = source_tags.union(set(method.region_tags))
# Remove automatically ignored region tags from region tag lists
grep_tags = set(tag for tag in grep_tags
if tag not in ignored_tags)
source_tags = set(tag for tag in source_tags
if tag not in ignored_tags)
# Add manually ignored (via yaml) tags to ignored tags list
# These should *not* overlap w/ source_tags, but we
# check that in validate_yaml_syntax - *not here!*
ignored_tags = ignored_tags.union(
yaml_utils.get_untested_region_tags(root_dir))
return grep_tags, source_tags, ignored_tags, source_methods | 9129b1fad5172f9b7054ba9b4e64cc4ece5ab09c | 14,650 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.