repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
taylorhxu/neurosynth | neurosynth/analysis/reduce.py | 1 | 5689 | """ Dimensionality reduction methods"""
import numpy as np
from neurosynth.base.dataset import Dataset
from neurosynth.base import imageutils
import logging
logger = logging.getLogger('neurosynth.cluster')
def average_within_regions(dataset, regions, threshold=None, remove_zero=True):
""" Aggregates over all voxels within each ROI in the input image.
Takes a Dataset and a Nifti image that defines distinct regions, and
returns a numpy matrix of ROIs x mappables, where the value at each
ROI is the proportion of active voxels in that ROI. Each distinct ROI
must have a unique value in the image; non-contiguous voxels with the
same value will be assigned to the same ROI.
Args:
dataset: Either a Dataset instance from which image data are
extracted, or a Numpy array containing image data to use. If
the latter, the array contains voxels in rows and
features/studies in columns. The number of voxels must be equal
to the length of the vectorized image mask in the regions
image.
regions: An image defining the boundaries of the regions to use.
Can be one of:
1) A string name of the NIFTI or Analyze-format image
2) A NiBabel SpatialImage
3) A list of NiBabel images
4) A 1D numpy array of the same length as the mask vector in
the Dataset's current Masker.
threshold: An optional float in the range of 0 - 1 or integer. If
passed, the array will be binarized, with ROI values above the
threshold assigned to True and values below the threshold
assigned to False. (E.g., if threshold = 0.05, only ROIs in
which more than 5% of voxels are active will be considered
active.) If threshold is integer, studies will only be
considered active if they activate more than that number of
voxels in the ROI.
remove_zero: An optional boolean; when True, assume that voxels
with value of 0 should not be considered as a separate ROI, and
will be ignored.
Returns:
A 2D numpy array with ROIs in rows and mappables in columns.
"""
if not type(regions).__module__.startswith('numpy'):
regions = dataset.masker.mask(regions)
if isinstance(dataset, Dataset):
dataset = dataset.get_image_data(dense=False)
# If multiple images are passed, give each one a unique value
if regions.ndim == 2:
m = regions
for i in range(regions.shape[1]):
_nz = np.nonzero(m[:, i])[0]
if isinstance(threshold, int):
m[_nz, i] = 1.0
else:
m[_nz, i] = 1.0 / np.count_nonzero(m[:, i])
# Otherwise create an ROI-coding matrix
else:
labels = np.unique(regions)
if remove_zero:
labels = labels[np.nonzero(labels)]
n_regions = labels.size
m = np.zeros((regions.size, n_regions))
for i in range(n_regions):
if isinstance(threshold, int):
m[regions == labels[i], i] = 1.0
else:
m[regions == labels[i], i] = 1.0 / \
np.sum(regions == labels[i])
# Call dot() on the array itself as this will use sparse matrix
# multiplication if possible.
result = dataset.T.dot(m).T
if threshold is not None:
result[result < threshold] = 0.0
result = result.astype(bool)
return result
def apply_grid(dataset, masker=None, scale=5, threshold=None):
""" Imposes a 3D grid on the brain volume and averages across all voxels
that fall within each cell.
Args:
dataset: Data to apply grid to. Either a Dataset instance, or a numpy
array with voxels in rows and features in columns.
masker: Optional Masker instance used to map between the created grid
and the dataset. This is only needed if dataset is a numpy array;
if dataset is a Dataset instance, the Masker in the dataset will
be used.
scale: int; scaling factor (in mm) to pass onto create_grid().
threshold: Optional float to pass to reduce.average_within_regions().
Returns:
A tuple of length 2, where the first element is a numpy array of
dimensions n_cubes x n_studies, and the second element is a numpy
array, with the same dimensions as the Masker instance in the current
Dataset, that maps voxel identities onto cell IDs in the grid.
"""
if masker is None:
if isinstance(dataset, Dataset):
masker = dataset.masker
else:
raise ValueError(
"If dataset is a numpy array, a masker must be provided.")
grid = imageutils.create_grid(masker.volume, scale)
cm = masker.mask(grid, in_global_mask=True)
data = average_within_regions(dataset, cm, threshold)
return (data, grid)
def get_random_voxels(dataset, n_voxels):
""" Returns mappable data for a random subset of voxels.
May be useful as a baseline in predictive analyses--e.g., to compare
performance of a more principled feature selection method with simple
random selection.
Args:
dataset: A Dataset instance
n_voxels: An integer specifying the number of random voxels to select.
Returns:
A 2D numpy array with (randomly-selected) voxels in rows and mappables
in columns.
"""
voxels = np.arange(dataset.masker.n_vox_in_vol)
np.random.shuffle(voxels)
selected = voxels[0:n_voxels]
return dataset.get_image_data(voxels=selected)
| mit | -2,123,809,506,534,848,000 | 39.06338 | 79 | 0.642819 | false |
biocore/qiime | qiime/parse.py | 6 | 32670 | #!/usr/bin/env python
# file parse.py: parsers for map file, distance matrix file, env file
__author__ = "Rob Knight"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Rob Knight", "Daniel McDonald", "Greg Caporaso",
"Justin Kuczynski", "Cathy Lozupone", "Jens Reeder",
"Antonio Gonzalez Pena", "Jai Ram Rideout", "Will Van Treuren",
"Yoshiki Vazquez-Baeza", "Jose Antonio Navas Molina"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "[email protected]"
from string import strip
from collections import defaultdict
import os
from os.path import expandvars
import re
from types import GeneratorType
from numpy import concatenate, repeat, zeros, nan, asarray
from numpy.random import permutation
from skbio.stats.ordination import OrdinationResults
from skbio.parse.record_finder import LabeledRecordFinder
from cogent.parse.tree import DndParser
from skbio.parse.sequences import parse_fastq
from skbio.parse.sequences.fasta import FastaFinder
from skbio.sequence import DNA
from skbio.io.util import open_file
from cogent.core.tree import PhyloNode
def is_casava_v180_or_later(header_line):
""" True if this file is generated by Illumina software post-casava 1.8 """
assert header_line.startswith('@'),\
"Bad fastq file passed as input. Header line must start with '@'."
fields = header_line.split(':')
if len(fields) == 10 and fields[7] in 'YN':
return True
return False
def MinimalSamParser(data):
for line in data:
line = line.strip()
if not line or line.startswith('@'):
continue
else:
yield line.strip().split('\t')
class QiimeParseError(Exception):
pass
class IlluminaParseError(QiimeParseError):
pass
def parse_newick(lines, constructor=PhyloNode):
"""Return PhyloNode from newick file handle stripping quotes from tip names
This function wraps cogent.parse.tree.DndParser stripping
matched leading/trailing single quotes from tip names, and returning
a PhyloNode object by default (alternate constructor can be passed
with constructor=).
Sripping of quotes is essential for many applications in Qiime, as
the tip names are frequently matched to OTU ids, and if the tip name
is read in with leading/trailing quotes, node.Name won't match to the
corresponding OTU identifier. Disaster follows.
"""
return DndParser(lines, constructor=constructor, unescape_name=True)
def parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):
"""Parser for map file that relates samples to metadata.
Format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
Result: list of lists of fields, incl. headers.
"""
if hasattr(lines, "upper"):
# Try opening if a string was passed
try:
lines = open(lines, 'U')
except IOError:
raise QiimeParseError("A string was passed that doesn't refer "
"to an accessible filepath.")
if strip_quotes:
if suppress_stripping:
# remove quotes but not spaces
strip_f = lambda x: x.replace('"', '')
else:
# remove quotes and spaces
strip_f = lambda x: x.replace('"', '').strip()
else:
if suppress_stripping:
# don't remove quotes or spaces
strip_f = lambda x: x
else:
# remove spaces but not quotes
strip_f = lambda x: x.strip()
# Create lists to store the results
mapping_data = []
header = []
comments = []
# Begin iterating over lines
for line in lines:
line = strip_f(line)
if not line or (suppress_stripping and not line.strip()):
# skip blank lines when not stripping lines
continue
if line.startswith('#'):
line = line[1:]
if not header:
header = line.strip().split('\t')
else:
comments.append(line)
else:
# Will add empty string to empty fields
tmp_line = map(strip_f, line.split('\t'))
if len(tmp_line) < len(header):
tmp_line.extend([''] * (len(header) - len(tmp_line)))
mapping_data.append(tmp_line)
if not header:
raise QiimeParseError("No header line was found in mapping file.")
if not mapping_data:
raise QiimeParseError("No data found in mapping file.")
return mapping_data, header, comments
def parse_mapping_file_to_dict(*args, **kwargs):
"""Parser for map file that relates samples to metadata.
input format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
calls parse_mapping_file, then processes the result into a 2d dict, assuming
the first field is the sample id
e.g.: {'sample1':{'age':'3','sex':'male'},'sample2':...
returns the dict, and a list of comment lines
"""
mapping_data, header, comments = parse_mapping_file(*args, **kwargs)
return mapping_file_to_dict(mapping_data, header), comments
def mapping_file_to_dict(mapping_data, header):
"""processes mapping data in list of lists format into a 2 deep dict"""
map_dict = {}
for i in range(len(mapping_data)):
sam = mapping_data[i]
map_dict[sam[0]] = {}
for j in range(len(header)):
if j == 0:
continue # sampleID field
map_dict[sam[0]][header[j]] = sam[j]
return map_dict
def parse_prefs_file(prefs_string):
"""Returns prefs dict evaluated from prefs_string.
prefs_string: read buffer from prefs file or string containing prefs
dict. Must be able to evauluated as a dict using eval.
"""
try:
prefs = dict(eval(prefs_string))
except TypeError:
raise QiimeParseError(
"Invalid prefs file. Prefs file must contain a valid prefs dictionary.")
return prefs
def group_by_field(table, name):
"""Returns dict of field_state:[row_headers] from table.
Use to extract info from table based on a single field.
"""
try:
col_index = table[0].index(name)
except ValueError as e:
raise ValueError("Couldn't find name %s in headers: %s" %
(name, table[0]))
result = defaultdict(list)
for row in table[1:]:
header, state = row[0], row[col_index]
result[state].append(header)
return result
def group_by_fields(table, names):
"""Returns dict of (field_states):[row_headers] from table.
Use to extract info from table based on combinations of fields.
"""
col_indices = map(table[0].index, names)
result = defaultdict(list)
for row in table[1:]:
header = row[0]
states = tuple([row[i] for i in col_indices])
result[states].append(header)
return result
def parse_distmat(lines):
"""Parser for distance matrix file (e.g. UniFrac dist matrix).
The examples I have of this file are just sample x sample tab-delimited
text, so easiest way to handle is just to convert into a numpy array
plus a list of field names.
"""
header = None
result = []
for line in lines:
if line[0] == '\t': # is header
header = map(strip, line.split('\t')[1:])
else:
result.append(map(float, line.split('\t')[1:]))
return header, asarray(result)
def parse_matrix(lines):
"""Parser for a matrix file Tab delimited. skips first lines if led
by '#', assumes column headers line starts with a tab
"""
col_headers = None
result = []
row_headers = []
for line in lines:
if line[0] == '#':
continue
if line[0] == '\t': # is header
col_headers = map(strip, line.split('\t')[1:])
else:
entries = line.split('\t')
result.append(map(float, entries[1:]))
row_headers.append(entries[0])
return col_headers, row_headers, asarray(result)
def parse_distmat_to_dict(table):
"""Parse a dist matrix into an 2d dict indexed by sample ids.
table: table as lines
"""
col_headers, row_headers, data = parse_matrix(table)
assert(col_headers == row_headers)
result = defaultdict(dict)
for (sample_id_x, row) in zip(col_headers, data):
for (sample_id_y, value) in zip(row_headers, row):
result[sample_id_x][sample_id_y] = value
return result
def parse_bootstrap_support(lines):
"""Parser for a bootstrap/jackknife support in tab delimited text
"""
bootstraps = {}
for line in lines:
if line[0] == '#':
continue
wordlist = line.strip().split()
bootstraps[wordlist[0]] = float(wordlist[1])
return bootstraps
def parse_rarefaction_data(lines):
data = {}
data['headers'] = []
data['options'] = []
data['xaxis'] = []
data['series'] = {}
data['error'] = {}
data['color'] = {}
for l in lines:
if l.startswith('#'):
data['headers'].append(l.strip('#').strip())
continue
if l.startswith('xaxis'):
data['xaxis'] = [float(v) for v in l[6:].strip().split('\t')]
continue
if l.startswith('>>'):
data['options'].append(l.strip('>').strip())
continue
if l.startswith('series'):
data['series'][data['options'][len(data['options']) - 1]] = \
[float(v) for v in l[7:].strip().split('\t')]
continue
if l.startswith('error'):
data['error'][data['options'][len(data['options']) - 1]] = \
[float(v) for v in l[6:].strip().split('\t')]
if l.startswith('color'):
data['color'][data['options'][len(data['options']) - 1]] = \
str(l[6:].strip())
if(len(str(l[6:].strip())) < 1):
print data['options'][len(data['options']) - 1]
return data
def parse_rarefaction_record(line):
""" Return (rarefaction_fn, [data])"""
def float_or_nan(v):
try:
return float(v)
except ValueError:
return nan
entries = line.split('\t')
return entries[0], map(float_or_nan, entries[1:])
def parse_rarefaction(lines):
"""Function for parsing rarefaction files specifically for use in
make_rarefaction_plots.py"""
col_headers = []
comments = []
rarefaction_data = []
rarefaction_fns = []
for line in lines:
if line[0] == '#':
# is comment
comments.append(line)
elif line[0] == '\t':
# is header
col_headers = map(strip, line.split('\t'))
else:
# is rarefaction record
rarefaction_fn, data = parse_rarefaction_record(line)
rarefaction_fns.append(rarefaction_fn)
rarefaction_data.append(data)
return col_headers, comments, rarefaction_fns, rarefaction_data
def parse_coords(lines):
"""Parse skbio's ordination results file into coords, labels, eigvals,
pct_explained.
Returns:
- list of sample labels in order
- array of coords (rows = samples, cols = axes in descending order)
- list of eigenvalues
- list of percent variance explained
For the file format check
skbio.stats.ordination.OrdinationResults.read
Strategy: read the file using skbio's parser and return the objects
we want
"""
pcoa_results = OrdinationResults.read(lines)
return (pcoa_results.site_ids, pcoa_results.site, pcoa_results.eigvals,
pcoa_results.proportion_explained)
def parse_rarefaction_fname(name_string):
"""returns base, seqs/sam, iteration, extension. seqs, iters as ints
all as strings, some may be empty strings ('')"""
root, ext = os.path.splitext(name_string)
root_list = root.split("_")
iters = int(root_list.pop())
seqs_per_sam = int(root_list.pop())
base_name = "_".join(root_list)
return base_name, seqs_per_sam, iters, ext
def parse_taxonomy(infile):
"""parse a taxonomy file.
Typically the lines in these files look like:
3 SAM1_32 \t Root;Bacteria;Fi... \t 0.9
where the first field is the sequence identifier, the second field is the
taxonomy assignment separated by ; characters, and the third field is a
quality score (e.g., confidence from the RDP classifier)
when using the BLAST taxonomy assigner, an additional field is included,
containing the sequence identifier of the best blast hit or each input
sequence. these lines might look like:
3 SAM1_32 \t Root;Bacteria;Fi... \t 1e-42 \t A1237756
Returns: dict of otu id to taxonomy name.
ignores other parts of the otu file, such as confidence and seq id (otu id
only)
"""
res = {}
for line in infile:
if not line or line.startswith('#'):
continue
line = line.rstrip("\n")
fields = line.split('\t')
otu = fields[0].split(' ')[0]
res[otu] = taxa_split(fields[1])
return res
parse_observation_metadata = parse_taxonomy
def taxa_split(taxa_string):
return [t.strip() for t in taxa_string.split(';')]
def parse_taxonomy_to_otu_metadata(
lines, labels=['taxonomy', 'score'], process_fs=[taxa_split, float]):
""" Return a dict mapping otu identifier to dict of otu metadata
lines: file handle or list of lines - format should be:
otu_id <tab> metadata entry 1 <tab> metadata entry 2 <tab> ...
labels: list of lables for metadata entrys to be used in the
internal dicts. each internal dict will have only as many entries
as there are labels (extra metadata entries in the input file
will be ignored)
process_fs: functions which are applied to each metadata entry -
if there are more process_fs than labels, the additional ones
will be ignored
"""
result = {}
for line in lines:
line = line.strip()
fields = line.split('\t')
id_ = fields[0].split()[0]
result[id_] = {}
for i, field in enumerate(fields[1:]):
try:
label = labels[i]
except IndexError:
continue
try:
value = process_fs[i](field)
except IndexError:
raise ValueError(
"Too few process functions provided (n=%d)." %
len(process_fs))
result[id_][label] = value
return result
def process_otu_table_sample_ids(sample_id_fields):
""" process the sample IDs line of an OTU table """
if len(sample_id_fields) == 0:
raise ValueError('Error parsing sample ID line in OTU table. Fields are %s'
% ' '.join(sample_id_fields))
# Detect if a metadata column is included as the last column. This
# field will be named either 'Consensus Lineage' or 'OTU Metadata',
# but we don't care about case or spaces.
last_column_header = sample_id_fields[-1].strip().replace(' ', '').lower()
if last_column_header in ['consensuslineage', 'otumetadata', 'taxonomy']:
has_metadata = True
sample_ids = sample_id_fields[:-1]
else:
has_metadata = False
sample_ids = sample_id_fields
# Return the list of sample IDs and boolean indicating if a metadata
# column is included.
return sample_ids, has_metadata
def parse_classic_otu_table(lines, count_map_f=int, remove_empty_rows=False):
"""parses a classic otu table (sample ID x OTU ID map)
Returns tuple: sample_ids, otu_ids, matrix of OTUs(rows) x samples(cols),
and lineages from infile.
"""
otu_table = []
otu_ids = []
metadata = []
sample_ids = []
# iterate over lines in the OTU table -- keep track of line number
# to support legacy (Qiime 1.2.0 and earlier) OTU tables
for i, line in enumerate(lines):
line = line.strip()
if line:
if (i == 1 or i == 0) and line.startswith('#OTU ID') and not sample_ids:
# we've got a legacy OTU table
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU table. Appears to be a" +
" legacy OTU table. Sample ID line:\n %s" % line)
elif not line.startswith('#'):
if not sample_ids:
# current line is the first non-space, non-comment line
# in OTU table, so contains the sample IDs
try:
sample_ids, has_metadata = process_otu_table_sample_ids(
line.strip().split('\t')[1:])
except ValueError:
raise ValueError("Error parsing sample IDs in OTU table." +
" Sample ID line:\n %s" % line)
else:
# current line is OTU line in OTU table
fields = line.split('\t')
if has_metadata:
# if there is OTU metadata the last column gets appended
# to the metadata list
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(
fields[1:-1],
dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:-1], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields >= 0).all() and \
sum(valid_fields) == 0.0:
continue
metadata.append(map(strip, fields[-1].split(';')))
else:
# otherwise all columns are appended to otu_table
# added in a try/except to handle OTU tables containing
# floating numbers
try:
valid_fields = asarray(
fields[1:],
dtype=count_map_f)
except ValueError:
valid_fields = asarray(fields[1:], dtype=float)
# validate that there are no empty rows
if remove_empty_rows and (valid_fields >= 0.0).all() and \
sum(valid_fields) == 0.0:
continue
otu_table.append(valid_fields)
# grab the OTU ID
otu_id = fields[0].strip()
otu_ids.append(otu_id)
return sample_ids, otu_ids, asarray(otu_table), metadata
parse_otu_table = parse_classic_otu_table
def parse_taxa_summary_table(lines):
result = parse_classic_otu_table(lines, count_map_f=float)
return result[0], result[1], result[2]
def make_envs_dict(abund_mtx, sample_names, taxon_names):
""" makes an envs dict suitable for unifrac from an abundance matrix
abund_mtx is samples (rows) by seqs (colunmns) numpy 2d array
sample_names is a list, length = num rows
taxon_names is a list, length = num columns
"""
num_samples, num_seqs = abund_mtx.shape
if (num_samples, num_seqs) != (len(sample_names), len(taxon_names)):
raise ValueError(
"Shape of matrix %s doesn't match # samples and # taxa (%s and %s)" %
(abund_mtx.shape, num_samples, num_seqs))
envs_dict = {}
sample_names = asarray(sample_names)
for i, taxon in enumerate(abund_mtx.T):
nonzeros = taxon.nonzero() # this removes zero values to reduce memory
envs_dict[taxon_names[i]] = dict(zip(sample_names[nonzeros],
taxon[nonzeros]))
return envs_dict
def fields_to_dict(lines, delim='\t', strip_f=strip):
"""makes a dict where first field is key, rest are vals."""
result = {}
for line in lines:
# skip empty lines
if strip_f:
fields = map(strip_f, line.split(delim))
else:
fields = line.split(delim)
if not fields[0]: # empty string in first field implies problem
continue
result[fields[0]] = fields[1:]
return result
def parse_qiime_parameters(lines):
""" Return 2D dict of params (and values, if applicable) which should be on
"""
# The result object is a default dict: if keys are not
# present, {} is returned
result = defaultdict(dict)
for line in lines:
line = line.strip()
if line and not line.startswith('#'):
pound_pos = line.find('#')
# A pound sign only starts an inline comment if it is preceded by
# whitespace.
if pound_pos > 0 and line[pound_pos - 1].isspace():
line = line[:pound_pos].rstrip()
fields = line.split(None, 1)
script_id, parameter_id = fields[0].split(':')
try:
value = fields[1]
except IndexError:
continue
if value.upper() == 'FALSE' or value.upper() == 'NONE':
continue
elif value.upper() == 'TRUE':
value = None
else:
pass
result[script_id][parameter_id] = value
return result
def parse_qiime_config_file(qiime_config_file):
""" Parse lines in a qiime_config file
"""
result = {}
for line in qiime_config_file:
line = line.strip()
# ignore blank lines or lines beginning with '#'
if not line or line.startswith('#'):
continue
fields = line.split()
param_id = fields[0]
param_value = expandvars(' '.join(fields[1:])) or None
result[param_id] = param_value
return result
def parse_qiime_config_files(qiime_config_files):
""" Parse files in (ordered!) list of qiime_config_files
The order of files must be least important to most important.
Values defined in earlier files will be overwritten if the same
values are defined in later files.
"""
# The qiime_config object is a default dict: if keys are not
# present, none is returned
def return_none():
return None
results = defaultdict(return_none)
for qiime_config_file in qiime_config_files:
try:
results.update(parse_qiime_config_file(qiime_config_file))
except IOError:
pass
return results
def parse_tmp_to_final_filepath_map_file(lines):
"""Parses poller maps of tmp -> final file names
For example, lines:
tmpA1.txt tmpA2.txt tmpA3.txt A.txt
B1.txt B2.txt B3.txt B.txt
Would result in:
([[tmpA1.txt,tmpA2.txt,tmpA3.txt], [B1.txt,B2.txt,B3.txt]],
[A.txt,B.txt])
"""
infiles_lists = []
out_filepaths = []
for line in lines:
fields = line.split()
infiles_lists.append(fields[:-1])
out_filepaths.append(fields[-1])
return infiles_lists, out_filepaths
def parse_metadata_state_descriptions(state_string):
"""From string in format 'col1:good1,good2;col2:good1' return dict."""
result = {}
state_string = state_string.strip()
if state_string:
cols = map(strip, state_string.split(';'))
for c in cols:
# split on the first colon to account for category names with
# colons
colname, vals = map(strip, c.split(':', 1))
vals = map(strip, vals.split(','))
result[colname] = set(vals)
return result
def parse_illumina_line(l, barcode_length, rev_comp_barcode,
barcode_in_sequence=False):
"""Parses a single line of Illumina data
"""
fields = l.strip().split(':')
y_position_subfields = fields[4].split('#')
y_position = int(y_position_subfields[0])
sequence = fields[5]
qual_string = fields[6]
if barcode_in_sequence:
barcode = sequence[:barcode_length]
sequence = sequence[barcode_length:]
qual_string = qual_string[barcode_length:]
else:
barcode = y_position_subfields[1][:barcode_length]
if rev_comp_barcode:
barcode = str(DNA(barcode).rc())
result = {
'Full description': ':'.join(fields[:5]),
'Machine Name': fields[0],
'Channel Number': int(fields[1]),
'Tile Number': int(fields[2]),
'X Position': int(fields[3]),
'Y Position': y_position,
'Barcode': barcode,
'Full Y Position Field': fields[4],
'Sequence': sequence,
'Quality Score': qual_string}
return result
def parse_qual_score(infile, value_cast_f=int):
"""Load quality scores into dict."""
id_to_qual = dict([rec for rec in MinimalQualParser(infile, value_cast_f)])
return id_to_qual
def MinimalQualParser(infile, value_cast_f=int, full_header=False):
"""Yield quality scores"""
for rec in FastaFinder(infile):
curr_id = rec[0][1:]
curr_qual = ' '.join(rec[1:])
try:
parts = asarray(curr_qual.split(), dtype=value_cast_f)
except ValueError:
raise QiimeParseError(
"Invalid qual file. Check the format of the qual files.")
if full_header:
curr_pid = curr_id
else:
curr_pid = curr_id.split()[0]
yield (curr_pid, parts)
def parse_qual_scores(qual_files):
"""Load qual scores into dict of {id:qual_scores}.
No filtering is performed at this step.
"""
qual_mappings = {}
for qual_file in qual_files:
qual_mappings.update(parse_qual_score(qual_file))
return qual_mappings
def parse_trflp(lines):
"""Load a trflp file and returns a header and data lists"""
sample_ids = []
otu_ids = []
data = []
non_alphanum_mask = re.compile('[^\w|^\t]')
# not sure why the above regex doesn't cover the following regex...
dash_space_mask = re.compile('[_ -]')
for i, line in enumerate(lines):
elements = line.strip('\n').split('\t')
# special handling for the first line only
if i == 0:
# validating if the file has a header
if elements[0] == '':
for otu_id in elements[1:]:
otu_ids.append(non_alphanum_mask.sub('_', otu_id))
continue
else:
for j, otu_id in enumerate(elements[1:]):
otu_ids.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))
# handling of all other lines
current_row = []
# converting each value in the row to int
for count in elements[1:]:
try:
current_row.append(int(round(float(count), 0)))
except ValueError:
current_row.append(0)
# if the sum of all the values is equial to 0 ignore line
if sum(current_row) == 0:
continue
# adding sample header to list
sample_ids.append(non_alphanum_mask.sub('.',
dash_space_mask.sub('.', elements[0])))
# validating the size of the headers to add missing columns
# this is only valid when there is no header
if len(current_row) > len(otu_ids):
# modify header data
extra_cols = []
for j in range(len(otu_ids), len(current_row)):
extra_cols.append(non_alphanum_mask.sub('_', 'Bin%3d' % j))
# modify data
for j in range(len(data)):
data[j].extend([0] * (len(current_row) - len(otu_ids)))
otu_ids.extend(extra_cols)
elif len(current_row) < len(otu_ids):
# modify data
current_row.extend([0] * (len(otu_ids) - len(current_row)))
data.append(current_row)
return sample_ids, otu_ids, asarray(data).transpose()
def parse_denoiser_mapping(denoiser_map):
""" read a denoiser mapping file into a dictionary """
result = {}
for line in denoiser_map:
line = line.strip().split('\t')
denoised_id = line[0].rstrip(':')
original_ids = [denoised_id] + line[1:]
if denoised_id in result:
# just a healthy dose of paranoia
raise ValueError("Duplicated identifiers in denoiser mapping file: "
"are you sure you merged the correct files?")
else:
result[denoised_id] = original_ids
return result
def parse_otu_map(otu_map_f, otu_ids_to_exclude=None, delim='_'):
""" parse otu map file into a sparse dict {(otu_idx,sample_idx):count}
This function is much more memory efficent than fields_to_dict and
and the result dict is of the correct format to be passed to
table_factory for creating OtuTable objects.
"""
if otu_ids_to_exclude is None:
otu_ids_to_exclude = {}
result = defaultdict(int)
sample_ids = []
sample_id_idx = {}
otu_ids = []
otu_count = 0
sample_count = 0
for line in otu_map_f:
fields = line.strip().split('\t')
otu_id = fields[0]
if otu_id in otu_ids_to_exclude:
continue
for seq_id in fields[1:]:
sample_id = seq_id.split(delim)[0]
try:
sample_index = sample_id_idx[sample_id]
except KeyError:
sample_index = sample_count
sample_id_idx[sample_id] = sample_index
sample_count += 1
sample_ids.append(sample_id)
# {(row,col):val}
result[(otu_count, sample_index)] += 1
otu_count += 1
otu_ids.append(otu_id)
return result, sample_ids, otu_ids
def parse_sample_id_map(sample_id_map_f):
"""Parses the lines of a sample ID map file into a dictionary.
Returns a dictionary with original sample IDs as the keys and new sample
IDs as the values.
This function only allows a sample ID map to perform one-to-one mappings
between sample IDs (e.g. S1 and T1 point to new ID 'a', but a third
original ID, such as S2, cannot also point to 'a').
Arguments:
sample_id_map_f - the lines of a sample ID map file to parse. Each line
should contain two sample IDs separated by a tab. Each value in the
first column must be unique, since the returned data structure is a
dictionary using those values as keys
"""
result = {}
new_samp_id_counts = defaultdict(int)
for line in sample_id_map_f:
# Only try to parse lines that aren't just whitespace.
line = line.strip()
if line:
samp_id, mapped_id = line.split('\t')
if samp_id in result:
raise ValueError("The first column of the sample ID map must "
"contain unique sample IDs ('%s' is "
"repeated). The second column, however, may "
"contain repeats." % samp_id)
elif new_samp_id_counts[mapped_id] >= 2:
raise ValueError("Only two original sample IDs may map to the "
"same new sample ID. The new sample ID '%s' "
"has more than two sample IDs mapping to it."
% mapped_id)
else:
result[samp_id] = mapped_id
new_samp_id_counts[mapped_id] += 1
return result
def parse_items(fp):
"""Parse items from a file where each item is in a different line
Parameters
----------
fp : str/bytes/unicode string or file-like
Filepath or file-like object to parse.
Returns
-------
list
List of the items parsed from the file
"""
with open_file(fp, 'U') as f:
items = f.read().strip('\n').split('\n')
if items == ['']:
items = []
return items
| gpl-2.0 | -6,065,542,379,703,306,000 | 33.281217 | 95 | 0.575084 | false |
bameda/python-taiga | tests/test_milestones.py | 4 | 3121 | import datetime
from taiga.requestmaker import RequestMaker
from taiga.models import UserStory, Milestones
from taiga import TaigaAPI
import unittest
from mock import patch
from .tools import create_mock_json
from .tools import MockResponse
class TestMilestones(unittest.TestCase):
@patch('taiga.requestmaker.RequestMaker.get')
def test_single_milestone_parsing(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/milestone_details_success.json'))
api = TaigaAPI(token='f4k3')
milestone = api.milestones.get(1)
self.assertEqual(milestone.name, 'MILESTONE 1')
self.assertTrue(isinstance(milestone.user_stories[0], UserStory))
@patch('taiga.requestmaker.RequestMaker.get')
def test_list_milestones_parsing(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/milestones_list_success.json'))
api = TaigaAPI(token='f4k3')
milestones = api.milestones.list()
self.assertEqual(milestones[0].name, 'MILESTONE 1')
self.assertTrue(isinstance(milestones[0].user_stories[0], UserStory))
@patch('taiga.requestmaker.RequestMaker.post')
def test_milestone_create(self, mock_requestmaker_post):
api = TaigaAPI(token='f4k3')
start_time = datetime.datetime(2015, 1, 16, 0, 0)
finish_time = datetime.datetime(2015, 2, 16, 0, 0)
api.milestones.create(1, 'Sprint Jan', start_time, finish_time)
mock_requestmaker_post.assert_called_with('milestones',
payload={'project': 1, 'estimated_finish': '2015-02-16',
'estimated_start': '2015-01-16', 'name': 'Sprint Jan'})
@patch('taiga.requestmaker.RequestMaker.post')
def test_milestone_import(self, mock_requestmaker_post):
rm = RequestMaker('/api/v1', 'fakehost', 'faketoken')
start_time = datetime.datetime(2015, 1, 16, 0, 0)
finish_time = datetime.datetime(2015, 2, 16, 0, 0)
milestone = Milestones(rm).import_(1, 'Sprint Jan', start_time, finish_time)
mock_requestmaker_post.assert_called_with(
'/{endpoint}/{id}/{type}', endpoint='importer', payload={'project': 1,
'name': 'Sprint Jan',
'estimated_start': '2015-01-16',
'estimated_finish': '2015-02-16'},
id=1, type='milestone'
)
@patch('taiga.requestmaker.RequestMaker.get')
def test_stats(self, mock_requestmaker_get):
mock_requestmaker_get.return_value = MockResponse(200,
create_mock_json('tests/resources/milestone_details_success.json'))
api = TaigaAPI(token='f4k3')
milestone = api.milestones.get(1)
milestone.stats()
mock_requestmaker_get.assert_called_with(
'/{endpoint}/{id}/stats',
endpoint='milestones', id=milestone.id
)
| mit | -2,315,816,010,210,566,000 | 47.765625 | 103 | 0.624479 | false |
deepsrijit1105/edx-platform | lms/djangoapps/dashboard/git_import.py | 27 | 11784 | """
Provides a function for importing a git repository into the lms
instance when using a mongo modulestore
"""
import os
import re
import StringIO
import subprocess
import logging
from django.conf import settings
from django.core import management
from django.core.management.base import CommandError
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
import mongoengine
from dashboard.models import CourseImportLog
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
log = logging.getLogger(__name__)
DEFAULT_GIT_REPO_DIR = '/edx/var/app/edxapp/course_repos'
class GitImportError(Exception):
"""
Exception class for handling the typical errors in a git import.
"""
MESSAGE = None
def __init__(self, message=None):
if message is None:
message = self.message
super(GitImportError, self).__init__(message)
class GitImportErrorNoDir(GitImportError):
"""
GitImportError when no directory exists at the specified path.
"""
def __init__(self, repo_dir):
super(GitImportErrorNoDir, self).__init__(
_(
"Path {0} doesn't exist, please create it, "
"or configure a different path with "
"GIT_REPO_DIR"
).format(repo_dir)
)
class GitImportErrorUrlBad(GitImportError):
"""
GitImportError when the git url provided wasn't usable.
"""
MESSAGE = _(
'Non usable git url provided. Expecting something like:'
' [email protected]:mitocw/edx4edx_lite.git'
)
class GitImportErrorBadRepo(GitImportError):
"""
GitImportError when the cloned repository was malformed.
"""
MESSAGE = _('Unable to get git log')
class GitImportErrorCannotPull(GitImportError):
"""
GitImportError when the clone of the repository failed.
"""
MESSAGE = _('git clone or pull failed!')
class GitImportErrorXmlImportFailed(GitImportError):
"""
GitImportError when the course import command failed.
"""
MESSAGE = _('Unable to run import command.')
class GitImportErrorUnsupportedStore(GitImportError):
"""
GitImportError when the modulestore doesn't support imports.
"""
MESSAGE = _('The underlying module store does not support import.')
class GitImportErrorRemoteBranchMissing(GitImportError):
"""
GitImportError when the remote branch doesn't exist.
"""
# Translators: This is an error message when they ask for a
# particular version of a git repository and that version isn't
# available from the remote source they specified
MESSAGE = _('The specified remote branch is not available.')
class GitImportErrorCannotBranch(GitImportError):
"""
GitImportError when the local branch doesn't exist.
"""
# Translators: Error message shown when they have asked for a git
# repository branch, a specific version within a repository, that
# doesn't exist, or there is a problem changing to it.
MESSAGE = _('Unable to switch to specified branch. Please check your branch name.')
def cmd_log(cmd, cwd):
"""
Helper function to redirect stderr to stdout and log the command
used along with the output. Will raise subprocess.CalledProcessError if
command doesn't return 0, and returns the command's output.
"""
output = subprocess.check_output(cmd, cwd=cwd, stderr=subprocess.STDOUT)
log.debug(u'Command was: %r. Working directory was: %r', ' '.join(cmd), cwd)
log.debug(u'Command output was: %r', output)
return output
def switch_branch(branch, rdir):
"""
This will determine how to change the branch of the repo, and then
use the appropriate git commands to do so.
Raises an appropriate GitImportError exception if there is any issues with changing
branches.
"""
# Get the latest remote
try:
cmd_log(['git', 'fetch', ], rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to fetch remote: %r', ex.output)
raise GitImportErrorCannotBranch()
# Check if the branch is available from the remote.
cmd = ['git', 'ls-remote', 'origin', '-h', 'refs/heads/{0}'.format(branch), ]
try:
output = cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Getting a list of remote branches failed: %r', ex.output)
raise GitImportErrorCannotBranch()
if branch not in output:
raise GitImportErrorRemoteBranchMissing()
# Check it the remote branch has already been made locally
cmd = ['git', 'branch', '-a', ]
try:
output = cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Getting a list of local branches failed: %r', ex.output)
raise GitImportErrorCannotBranch()
branches = []
for line in output.split('\n'):
branches.append(line.replace('*', '').strip())
if branch not in branches:
# Checkout with -b since it is remote only
cmd = ['git', 'checkout', '--force', '--track',
'-b', branch, 'origin/{0}'.format(branch), ]
try:
cmd_log(cmd, rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to checkout remote branch: %r', ex.output)
raise GitImportErrorCannotBranch()
# Go ahead and reset hard to the newest version of the branch now that we know
# it is local.
try:
cmd_log(['git', 'reset', '--hard', 'origin/{0}'.format(branch), ], rdir)
except subprocess.CalledProcessError as ex:
log.exception('Unable to reset to branch: %r', ex.output)
raise GitImportErrorCannotBranch()
def add_repo(repo, rdir_in, branch=None):
"""
This will add a git repo into the mongo modulestore.
If branch is left as None, it will fetch the most recent
version of the current branch.
"""
# pylint: disable=too-many-statements
git_repo_dir = getattr(settings, 'GIT_REPO_DIR', DEFAULT_GIT_REPO_DIR)
git_import_static = getattr(settings, 'GIT_IMPORT_STATIC', True)
# Set defaults even if it isn't defined in settings
mongo_db = {
'host': 'localhost',
'port': 27017,
'user': '',
'password': '',
'db': 'xlog',
}
# Allow overrides
if hasattr(settings, 'MONGODB_LOG'):
for config_item in ['host', 'user', 'password', 'db', 'port']:
mongo_db[config_item] = settings.MONGODB_LOG.get(
config_item, mongo_db[config_item])
if not os.path.isdir(git_repo_dir):
raise GitImportErrorNoDir(git_repo_dir)
# pull from git
if not (repo.endswith('.git') or
repo.startswith(('http:', 'https:', 'git:', 'file:'))):
raise GitImportErrorUrlBad()
if rdir_in:
rdir = os.path.basename(rdir_in)
else:
rdir = repo.rsplit('/', 1)[-1].rsplit('.git', 1)[0]
log.debug('rdir = %s', rdir)
rdirp = '{0}/{1}'.format(git_repo_dir, rdir)
if os.path.exists(rdirp):
log.info('directory already exists, doing a git pull instead '
'of git clone')
cmd = ['git', 'pull', ]
cwd = rdirp
else:
cmd = ['git', 'clone', repo, ]
cwd = git_repo_dir
cwd = os.path.abspath(cwd)
try:
ret_git = cmd_log(cmd, cwd=cwd)
except subprocess.CalledProcessError as ex:
log.exception('Error running git pull: %r', ex.output)
raise GitImportErrorCannotPull()
if branch:
switch_branch(branch, rdirp)
# get commit id
cmd = ['git', 'log', '-1', '--format=%H', ]
try:
commit_id = cmd_log(cmd, cwd=rdirp)
except subprocess.CalledProcessError as ex:
log.exception('Unable to get git log: %r', ex.output)
raise GitImportErrorBadRepo()
ret_git += '\nCommit ID: {0}'.format(commit_id)
# get branch
cmd = ['git', 'symbolic-ref', '--short', 'HEAD', ]
try:
branch = cmd_log(cmd, cwd=rdirp)
except subprocess.CalledProcessError as ex:
# I can't discover a way to excercise this, but git is complex
# so still logging and raising here in case.
log.exception('Unable to determine branch: %r', ex.output)
raise GitImportErrorBadRepo()
ret_git += '{0}Branch: {1}'.format(' \n', branch)
# Get XML logging logger and capture debug to parse results
output = StringIO.StringIO()
import_log_handler = logging.StreamHandler(output)
import_log_handler.setLevel(logging.DEBUG)
logger_names = ['xmodule.modulestore.xml_importer', 'git_add_course',
'xmodule.modulestore.xml', 'xmodule.seq_module', ]
loggers = []
for logger_name in logger_names:
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
logger.addHandler(import_log_handler)
loggers.append(logger)
try:
management.call_command('import', git_repo_dir, rdir,
nostatic=not git_import_static)
except CommandError:
raise GitImportErrorXmlImportFailed()
except NotImplementedError:
raise GitImportErrorUnsupportedStore()
ret_import = output.getvalue()
# Remove handler hijacks
for logger in loggers:
logger.setLevel(logging.NOTSET)
logger.removeHandler(import_log_handler)
course_key = None
location = 'unknown'
# extract course ID from output of import-command-run and make symlink
# this is needed in order for custom course scripts to work
match = re.search(r'(?ms)===> IMPORTING courselike (\S+)', ret_import)
if match:
course_id = match.group(1)
try:
course_key = CourseKey.from_string(course_id)
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
cdir = '{0}/{1}'.format(git_repo_dir, course_key.course)
log.debug('Studio course dir = %s', cdir)
if os.path.exists(cdir) and not os.path.islink(cdir):
log.debug(' -> exists, but is not symlink')
log.debug(subprocess.check_output(['ls', '-l', ],
cwd=os.path.abspath(cdir)))
try:
os.rmdir(os.path.abspath(cdir))
except OSError:
log.exception('Failed to remove course directory')
if not os.path.exists(cdir):
log.debug(' -> creating symlink between %s and %s', rdirp, cdir)
try:
os.symlink(os.path.abspath(rdirp), os.path.abspath(cdir))
except OSError:
log.exception('Unable to create course symlink')
log.debug(subprocess.check_output(['ls', '-l', ],
cwd=os.path.abspath(cdir)))
# store import-command-run output in mongo
mongouri = 'mongodb://{user}:{password}@{host}:{port}/{db}'.format(**mongo_db)
try:
if mongo_db['user'] and mongo_db['password']:
mdb = mongoengine.connect(mongo_db['db'], host=mongouri)
else:
mdb = mongoengine.connect(mongo_db['db'], host=mongo_db['host'], port=mongo_db['port'])
except mongoengine.connection.ConnectionError:
log.exception('Unable to connect to mongodb to save log, please '
'check MONGODB_LOG settings')
cil = CourseImportLog(
course_id=course_key,
location=location,
repo_dir=rdir,
created=timezone.now(),
import_log=ret_import,
git_log=ret_git,
)
cil.save()
log.debug('saved CourseImportLog for %s', cil.course_id)
mdb.disconnect()
| agpl-3.0 | -2,640,303,271,180,701,000 | 33.156522 | 99 | 0.631704 | false |
Xetlin/Lua | Bindings/Scripts/create_lua_library/create_lua_library.py | 10 | 41567 | import sys
import CppHeaderParser
import os
import errno
import re
from zipfile import *
import fnmatch
import re
def mkdir_p(path): # Same effect as mkdir -p, create dir and all necessary parent dirs
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST: # Dir already exists; not really an error
pass
else: raise
def template_returnPtrLookupArray(prefix, className, ptr):
out = "%sif %s == nil then return nil end\n" % (prefix, ptr)
out += "%sfor i=1,count(%s) do\n" % (prefix, ptr)
out += "%s\tlocal __c = _G[%s](\"__skip_ptr__\")\n" % (prefix, className.replace("*", ""))
out += "%s\t__c.__ptr = %s[i]\n" % (prefix, ptr)
out += "%s\t%s[i] = __c\n" % (prefix, ptr)
out += "%send\n" % (prefix)
out += "%sreturn %s\n" % (prefix,ptr)
return out
# Note we expect className to be a valid string
def template_returnPtrLookup(prefix, className, ptr):
out = "%sif %s == nil then return nil end\n" % (prefix, ptr)
out += "%slocal __c = _G[%s](\"__skip_ptr__\")\n" % (prefix, className.replace("*", ""))
out += "%s__c.__ptr = %s\n" % (prefix, ptr)
out += "%sreturn __c\n" % (prefix)
return out
def template_quote(str):
return "\"%s\"" % str;
def cleanDocs(docs):
return docs.replace("/*", "").replace("*/", "").replace("*", "").replace("\n", "").replace("\r", "").replace("::", ".").replace("\t", "")
def toLuaType(t):
return t.replace("void", "nil").replace("int", "Integer").replace("bool", "Boolean").replace("*", "")
# FIXME: Some "unsigned int *" functions are still being generated on the polycode API?
def typeFilter(ty):
ty = ty.replace("Polycode::", "")
ty = ty.replace("std::", "")
ty = ty.replace("const", "")
ty = ty.replace("inline", "")
ty = ty.replace("static", "")
ty = ty.replace("virtual", "")
ty = ty.replace("&", "")
ty = re.sub(r'^.*\sint\s*$', 'int', ty) # eg "unsigned int"
ty = re.sub(r'^.*\schar\s*$', 'char', ty) # eg "unsigned int"
ty = re.sub(r'^.*\slong\s*$', 'int', ty)
ty = re.sub(r'^.*\swchar_t\s*$', 'int', ty)
ty = re.sub(r'^.*\sshort\s*$', 'int', ty)
ty = re.sub(r'^.*\sfloat\s*$', 'Number', ty)
ty = re.sub(r'^.*\sdouble\s*$', 'Number', ty) # eg "long double"
ty = ty.replace("unsigned", "int")
ty = ty.replace("long", "int")
ty = ty.replace("float", "Number")
ty = ty.replace("double", "Number")
ty = ty.replace(" ", "") # Not very safe!
return ty
def createLUABindings(inputPath, prefix, mainInclude, libSmallName, libName, apiPath, apiClassPath, includePath, sourcePath, luaDocPath, inheritInModuleFiles):
wrappersHeaderOut = "" # Def: Global C++ *LUAWrappers.h
cppRegisterOut = "" # Def: Global C++ *LUA.cpp
cppLoaderOut = "" # Def: Global C++ *LUA.cpp
luaDocOut = ""
luaIndexOut = "" # Def: Global Lua everything-gets-required-from-this-file file
# Header boilerplate for wrappersHeaderOut and cppRegisterOut
cppRegisterOut += "#include \"%sLUA.h\"\n" % (prefix)
cppRegisterOut += "#include \"%sLUAWrappers.h\"\n" % (prefix)
cppRegisterOut += "#include \"PolyCoreServices.h\"\n\n"
cppRegisterOut += "using namespace Polycode;\n\n"
cppRegisterOut += "int luaopen_%s(lua_State *L) {\n" % (prefix)
if prefix != "Polycode" and prefix != "Physics2D" and prefix != "Physics3D" and prefix != "UI":
cppRegisterOut += "CoreServices *inst = (CoreServices*) *((PolyBase**)lua_touserdata(L, 1));\n"
cppRegisterOut += "CoreServices::setInstance(inst);\n"
cppRegisterOut += "\tstatic const struct luaL_reg %sLib [] = {" % (libSmallName)
wrappersHeaderOut += "#pragma once\n\n"
wrappersHeaderOut += "extern \"C\" {\n\n"
wrappersHeaderOut += "#include <stdio.h>\n"
wrappersHeaderOut += "#include \"lua.h\"\n"
wrappersHeaderOut += "#include \"lualib.h\"\n"
wrappersHeaderOut += "#include \"lauxlib.h\"\n"
wrappersHeaderOut += "#undef near\n"
wrappersHeaderOut += "#undef far\n"
wrappersHeaderOut += "} // extern \"C\" \n\n"
luaDocOut += "<?xml version=\"1.0\" ?>\n"
luaDocOut += "<docs>\n"
luaDocOut += "<classes>\n"
# Get list of headers to create bindings from
inputPathIsDir = os.path.isdir(inputPath)
if inputPathIsDir:
files = os.listdir(inputPath)
else:
files = []
with open(inputPath) as f:
for line in f.readlines():
files.append(line.strip()) # Strip whitespace, path/
filteredFiles = []
for fileName in files:
if inputPathIsDir:
fileName = "%s/%s" % (inputPath, fileName)
if os.path.isdir(fileName):
continue
head, tail = os.path.split(fileName)
ignore = ["PolyTween", "PolyTweenManager", "PolyGLSLProgram", "PolyGLSLShader", "PolyGLSLShaderModule", "PolyWinCore", "PolyCocoaCore", "PolyAGLCore", "PolySDLCore", "Poly_iPhone", "PolyGLES1Renderer", "PolyGLRenderer", "tinyxml", "tinystr", "OpenGLCubemap", "PolyiPhoneCore", "PolyGLES1Texture", "PolyGLTexture", "PolyGLVertexBuffer", "PolyThreaded", "PolyGLHeaders", "GLee", "PolyPeer", "PolySocket", "PolyClient", "PolyServer", "PolyServerWorld", "OSFILE", "OSFileEntry", "OSBasics", "PolyLogger", "PolyFontGlyphSheet"]
if tail.split(".")[1] == "h" and tail.split(".")[0] not in ignore:
filteredFiles.append(fileName)
wrappersHeaderOut += "#include \"%s\"\n" % (tail)
wrappersHeaderOut += "\nusing namespace std;\n\n"
wrappersHeaderOut += "\nnamespace Polycode {\n\n"
# list of classes that don't get the garbage collection in their meta table
disable_gc = ["Entity","SceneLabel", "SceneMesh", "Scene", "Texture", "Image", "Camera", "SceneParticleEmitter", "Mesh", "Vertex", "Polygon", "Polycode::Polygon", "Material", "ScenePrimitive", "SceneLine", "SceneLight", "SceneSound", "SceneImage", "SceneEntity", "SceneEntityInstance", "SceneSprite"]
# Special case: If we are building the Polycode library itself, inject the LuaEventHandler class.
# Note: so that event callbacks can work, any object inheriting from EventHandler will secretly
# be modified to actually inherit from LuaEventHandler instead.
if prefix == "Polycode":
wrappersHeaderOut += "class LuaEventHandler : public EventHandler {\n"
wrappersHeaderOut += "public:\n"
wrappersHeaderOut += " LuaEventHandler() : EventHandler() {}\n"
wrappersHeaderOut += " void handleEvent(Event *e) {\n"
wrappersHeaderOut += " lua_getfield (L, LUA_GLOBALSINDEX, \"__customError\");\n"
wrappersHeaderOut += " int errH = lua_gettop(L);\n"
wrappersHeaderOut += " lua_getfield(L, LUA_GLOBALSINDEX, \"__handleEvent\");\n"
wrappersHeaderOut += " lua_rawgeti( L, LUA_REGISTRYINDEX, wrapperIndex );\n"
wrappersHeaderOut += " PolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += " *userdataPtr = (PolyBase*)e;\n"
wrappersHeaderOut += " lua_pcall(L, 2, 0, errH);\n"
wrappersHeaderOut += " lua_settop(L, 0);\n"
wrappersHeaderOut += " }\n"
wrappersHeaderOut += " int wrapperIndex;\n"
wrappersHeaderOut += " lua_State *L;\n"
wrappersHeaderOut += "};\n\n"
# Iterate, process each input file
for fileName in filteredFiles:
# "Package owned" classes that ship with Polycode
inheritInModule = ["PhysicsGenericConstraint", "PhysicsHingeConstraint", "PhysicsPointToPointConstraint", "PhysicsConstraint", "PhysicsEntity", "CollisionScene", "CollisionEntity", "UIElement", "UIWindow", "UIMenuItem", "UIImage", "UIRect"]
# A file or comma-separated list of files can be given to specify classes which are "package owned"
# and should not be inherited out of Polycode/. The files should contain one class name per line,
# and the class name may be prefixed with a path (which will be ignored).
if inheritInModuleFiles:
for moduleFileName in inheritInModuleFiles.split(","):
with open(moduleFileName) as f:
for line in f.readlines():
inheritInModule.append(line.strip().split("/",1)[-1]) # Strip whitespace, path/
print("Parsing %s" % fileName)
try: # One input file parse.
f = open(fileName) # Def: Input file handle
contents = f.read().replace("_PolyExport", "") # Def: Input file contents, strip out "_PolyExport"
cppHeader = CppHeaderParser.CppHeader(contents, "string") # Def: Input file contents, parsed structure
ignore_classes = ["PolycodeShaderModule", "Object", "Threaded", "OpenGLCubemap", "PolyBase", "Matrix4::union "]
# Iterate, check each class in this file.
for ckey in cppHeader.classes:
print(">> Parsing class %s" % ckey)
c = cppHeader.classes[ckey] # Def: The class structure
luaClassBindingOut = "" # Def: The local lua file to generate for this class.
inherits = False
parentClass = ""
if len(c["inherits"]) > 0: # Does this class have parents?
if c["inherits"][0]["class"] not in ignore_classes:
if c["inherits"][0]["class"] in inheritInModule: # Parent class is in this module
luaClassBindingOut += "require \"%s/%s\"\n\n" % (prefix, c["inherits"][0]["class"])
else: # Parent class is in Polycore
luaClassBindingOut += "require \"Polycode/%s\"\n\n" % (c["inherits"][0]["class"])
luaClassBindingOut += "class \"%s\" (%s)\n\n" % (ckey, c["inherits"][0]["class"])
parentClass = c["inherits"][0]["class"]
inherits = True
if inherits == False: # Class does not have parents
luaClassBindingOut += "class \"%s\"\n\n" % ckey
if ckey in ignore_classes:
print("INGORING class %s" % ckey)
continue
#if len(c["methods"]["public"]) < 2: # Used to, this was a continue.
# print("Warning: Lua-binding class with less than two methods")
# continue # FIXME: Remove this, move any non-compileable classes into ignore_classes
extendString = ""
if len(c["inherits"]) > 0:
if c["inherits"][0]["class"] != "PolyBase":
extendString = " extends=\"%s\"" % (c["inherits"][0]["class"])
luaDocOut += "\t<class name=\"%s\"%s>\n" % (ckey, extendString)
if 'doxygen' in c:
luaDocOut += "\t\t<desc><![CDATA[%s]]></desc>\n" % (cleanDocs(c['doxygen']))
if ckey in disable_gc:
luaDocOut += "\t\t<class_notes>NOTE: %s instances are not automatically garbage collected.</class_notes>\n" % (ckey)
parsed_methods = [] # Def: List of discovered methods
ignore_methods = ["readByte32", "readByte16", "getCustomEntitiesByType", "Core", "Renderer", "Shader", "Texture", "handleEvent", "secondaryHandler", "getSTLString", "readInt"]
luaClassBindingOut += "\n\n"
luaDocOut += "\t\t<static_members>\n"
classProperties = [] # Def: List of found property structures ("properties" meaning "data members")
for pp in c["properties"]["public"]:
pp["type"] = pp["type"].replace("Polycode::", "")
pp["type"] = pp["type"].replace("std::", "")
if pp["type"].find("POLYIGNORE") != -1:
continue
if pp["type"].find("static ") != -1: # If static. FIXME: Static doesn't work?
if "defaltValue" in pp: # FIXME: defaltValue is misspelled.
defaltValue = pp["defaltValue"]
# The "Default Value" is more or less a literal C++ string. This causes a problem:
# Frequently we say static const int A = 1; static const int B = A + 1.
# Put in a one-off hack to ensure namespacing works in this special case.
if re.match(r'\s*[a-zA-Z_][a-zA-Z0-9_]*\s*\+', defaltValue):
defaltValue = "%s.%s" % (ckey, defaltValue)
luaClassBindingOut += "%s.%s = %s\n" % (ckey, pp["name"], defaltValue)
luaDocOut += "\t\t\t<static_member name=\"%s\" type=\"%s\" value=\"%s\">\n" % (pp["name"], toLuaType(typeFilter(pp["type"])), pp["defaltValue"])
if 'doxygen' in pp:
luaDocOut += "\t\t\t\t<desc><![CDATA[%s]]></desc>\n" % (cleanDocs(pp['doxygen']))
luaDocOut += "\t\t\t</static_member>\n"
else: # FIXME: Nonstatic method ? variable ?? found.
#there are some bugs in the class parser that cause it to return junk
if pp["type"].find("vector") == -1 and pp["name"] != "setScale" and pp["name"] != "setPosition" and pp["name"] != "BUFFER_CACHE_PRECISION" and not pp["name"].isdigit():
classProperties.append(pp)
luaDocOut += "\t\t</static_members>\n"
# Iterate over properties, creating getters
pidx = 0 # Def: Count of properties processed so far
# TODO: Remove or generalize ParticleEmitter special casing. These lines are marked with #SPEC
luaDocOut += "\t\t<members>\n"
numGetVars = 0
if len(classProperties) > 0: # If there are properties, add index lookup to the metatable
luaClassBindingOut += "function %s:__getvar(name)\n" % ckey
# Iterate over property structures, creating if/else clauses for each.
# TODO: Could a table be more appropriate for
for pp in classProperties:
if pp["name"] == "" or pp["array"] == 1:
continue
numGetVars = numGetVars + 1
pp["type"] = typeFilter(pp["type"])
if pidx == 0:
luaClassBindingOut += "\tif name == \"%s\" then\n" % (pp["name"])
else:
luaClassBindingOut += "\telseif name == \"%s\" then\n" % (pp["name"])
# Generate Lua side of binding:
# If type is a primitive such as Number/String/int/bool
if pp["type"] == "PolyKEY" or pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool":
luaClassBindingOut += "\t\treturn %s.%s_get_%s(self.__ptr)\n" % (libName, ckey, pp["name"])
# If type is a particle emitter, specifically #SPEC
elif (ckey == "ScreenParticleEmitter" or ckey == "SceneParticleEmitter") and pp["name"] == "emitter":
luaClassBindingOut += "\t\tlocal ret = %s(\"__skip_ptr__\")\n" % (pp["type"])
luaClassBindingOut += "\t\tret.__ptr = self.__ptr\n"
luaClassBindingOut += "\t\treturn ret\n"
# If type is a class
else:
luaClassBindingOut += "\t\tlocal retVal = %s.%s_get_%s(self.__ptr)\n" % (libName, ckey, pp["name"])
luaClassBindingOut += template_returnPtrLookup("\t\t", template_quote(pp["type"]), "retVal")
luaDocOut += "\t\t\t<member name=\"%s\" type=\"%s\">\n" % (pp["name"], toLuaType(typeFilter(pp["type"])))
if 'doxygen' in pp:
luaDocOut += "\t\t\t\t<desc><![CDATA[%s]]></desc>\n" % (cleanDocs(pp['doxygen']))
luaDocOut += "\t\t\t</member>\n"
# Generate C++ side of binding:
if not ((ckey == "ScreenParticleEmitter" or ckey == "SceneParticleEmitter") and pp["name"] == "emitter"): #SPEC
cppRegisterOut += "\t\t{\"%s_get_%s\", %s_%s_get_%s},\n" % (ckey, pp["name"], libName, ckey, pp["name"])
wrappersHeaderOut += "static int %s_%s_get_%s(lua_State *L) {\n" % (libName, ckey, pp["name"])
wrappersHeaderOut += "\tluaL_checktype(L, 1, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\t%s *inst = (%s*) *((PolyBase**)lua_touserdata(L, 1));\n" % (ckey, ckey)
outfunc = "this_shouldnt_happen"
retFunc = ""
if pp["type"] == "Number":
outfunc = "lua_pushnumber"
if pp["type"] == "String":
outfunc = "lua_pushstring"
retFunc = ".c_str()"
if pp["type"] == "int" or pp["type"] == "PolyKEY":
outfunc = "lua_pushinteger"
if pp["type"] == "bool":
outfunc = "lua_pushboolean"
if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool" or pp["type"] == "PolyKEY":
wrappersHeaderOut += "\t%s(L, inst->%s%s);\n" % (outfunc, pp["name"], retFunc)
else:
if pp["type"].find("*") != -1:
wrappersHeaderOut += "\tif(!inst->%s%s) {\n" % (pp["name"], retFunc)
wrappersHeaderOut += "\t\tlua_pushnil(L);\n"
wrappersHeaderOut += "\t} else {\n"
wrappersHeaderOut += "\t\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\t\t*userdataPtr = (PolyBase*)inst->%s%s;\n" % (pp["name"], retFunc)
wrappersHeaderOut += "\t}\n"
else:
wrappersHeaderOut += "\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\t*userdataPtr = (PolyBase*)&inst->%s%s;\n" % (pp["name"], retFunc)
wrappersHeaderOut += "\treturn 1;\n"
wrappersHeaderOut += "}\n\n"
# Success
pidx = pidx + 1
if numGetVars != 0:
luaClassBindingOut += "\tend\n"
if inherits:
luaClassBindingOut += "\tif %s[\"__getvar\"] ~= nil then\n" % (parentClass)
luaClassBindingOut += "\t\treturn %s.__getvar(self, name)\n" % (parentClass)
luaClassBindingOut += "\tend\n"
luaClassBindingOut += "end\n"
luaDocOut += "\t\t</members>\n"
luaClassBindingOut += "\n\n"
# Iterate over properties again, creating setters
pidx = 0 # Def: Count of
if len(classProperties) > 0: # If there are properties, add index setter to the metatable
luaClassBindingOut += "function %s:__setvar(name,value)\n" % ckey
for pp in classProperties:
if pp["name"] == "" or pp["array"] == 1:
continue
pp["type"] = typeFilter(pp["type"])
# If type is a primitive: Create lua and C++ sides at the same time.
if pp["type"] == "Number" or pp["type"] == "String" or pp["type"] == "int" or pp["type"] == "bool" or pp["type"] == "PolyKEY":
if pidx == 0:
luaClassBindingOut += "\tif name == \"%s\" then\n" % (pp["name"])
else:
luaClassBindingOut += "\telseif name == \"%s\" then\n" % (pp["name"])
luaClassBindingOut += "\t\t%s.%s_set_%s(self.__ptr, value)\n" % (libName, ckey, pp["name"])
luaClassBindingOut += "\t\treturn true\n"
cppRegisterOut += "\t\t{\"%s_set_%s\", %s_%s_set_%s},\n" % (ckey, pp["name"], libName, ckey, pp["name"])
wrappersHeaderOut += "static int %s_%s_set_%s(lua_State *L) {\n" % (libName, ckey, pp["name"])
wrappersHeaderOut += "\tluaL_checktype(L, 1, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\t%s *inst = (%s*) *((PolyBase**)lua_touserdata(L, 1));\n" % (ckey, ckey)
outfunc = "this_shouldnt_happen"
outfuncsuffix = ""
if pp["type"] == "Number":
outfunc = "lua_tonumber"
if pp["type"] == "String":
outfunc = "lua_tostring"
if pp["type"] == "int":
outfunc = "lua_tointeger"
if pp["type"] == "PolyKEY":
outfunc = "(PolyKEY)lua_tointeger"
if pp["type"] == "bool":
outfunc = "lua_toboolean"
outfuncsuffix = " != 0"
wrappersHeaderOut += "\t%s param = %s(L, 2)%s;\n" % (pp["type"], outfunc, outfuncsuffix)
wrappersHeaderOut += "\tinst->%s = param;\n" % (pp["name"])
wrappersHeaderOut += "\treturn 0;\n"
wrappersHeaderOut += "}\n\n"
pidx = pidx + 1 # Success
else:
if pp["type"].find("*") == -1 and pp["type"].find("static") == -1:
if pidx == 0:
luaClassBindingOut += "\tif name == \"%s\" then\n" % (pp["name"])
else:
luaClassBindingOut += "\telseif name == \"%s\" then\n" % (pp["name"])
luaClassBindingOut += "\t\t%s.%s_set_%s(self.__ptr, value.__ptr)\n" % (libName, ckey, pp["name"])
luaClassBindingOut += "\t\treturn true\n"
cppRegisterOut += "\t\t{\"%s_set_%s\", %s_%s_set_%s},\n" % (ckey, pp["name"], libName, ckey, pp["name"])
wrappersHeaderOut += "static int %s_%s_set_%s(lua_State *L) {\n" % (libName, ckey, pp["name"])
wrappersHeaderOut += "\tluaL_checktype(L, 1, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\t%s *inst = (%s*) *((PolyBase**)lua_touserdata(L, 1));\n" % (ckey, ckey)
wrappersHeaderOut += "\tluaL_checktype(L, 2, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\t%s *argInst = (%s*) *((PolyBase**)lua_touserdata(L, 2));\n" % (typeFilter(pp["type"]), typeFilter(pp["type"]))
wrappersHeaderOut += "\tinst->%s = *argInst;\n" % (pp["name"])
wrappersHeaderOut += "\treturn 0;\n"
wrappersHeaderOut += "}\n\n"
pidx = pidx + 1 # Success
# Notice: Setters for object types are not created.
if pidx != 0:
luaClassBindingOut += "\tend\n"
if inherits:
luaClassBindingOut += "\tif %s[\"__setvar\"] ~= nil then\n" % (parentClass)
luaClassBindingOut += "\t\treturn %s.__setvar(self, name, value)\n" % (parentClass)
luaClassBindingOut += "\telse\n"
luaClassBindingOut += "\t\treturn false\n"
luaClassBindingOut += "\tend\n"
else:
luaClassBindingOut += "\treturn false\n"
luaClassBindingOut += "end\n"
# Iterate over methods
luaClassBindingOut += "\n\n"
luaDocOut += "\t\t<methods>\n"
for pm in c["methods"]["public"]:
# Skip argument-overloaded methods and operators.
# TODO: Instead of skipping arguemnt overloads, have special behavior.
# TODO: Instead of skipping operators, add to metatable.
if pm["name"] in parsed_methods or pm["name"].find("operator") > -1 or pm["rtnType"].find("POLYIGNORE") > -1 or pm["name"] in ignore_methods:
continue
# Skip destructors and methods which return templates.
# TODO: Special-case certain kind of vector<>s?
if pm["name"] == "~"+ckey or pm["name"] == "CoreServices":
continue
staticString = ""
if pm["rtnType"].find("static ") != -1:
staticString = " static=\"true\""
if pm["rtnType"].find("std::vector") > -1:
vectorReturnClass = pm["rtnType"].replace("std::vector<", "").replace(">","").replace(" ", "")
luaDocOut += "\t\t\t<method name=\"%s\" return_array=\"true\" return_type=\"%s\"%s>\n" % (pm["name"], toLuaType(typeFilter(vectorReturnClass).replace("*", "")), staticString)
else:
luaDocOut += "\t\t\t<method name=\"%s\" return_type=\"%s\"%s>\n" % (pm["name"], toLuaType(typeFilter(pm["rtnType"].replace("*", ""))), staticString)
docs = None
if 'doxygen' in pm:
if pm['doxygen'].find("@return") > -1:
docs = cleanDocs(pm['doxygen']).split("@return")[0].split("@param")
else:
docs = cleanDocs(pm['doxygen']).split("@param")
luaDocOut += "\t\t\t\t<desc><![CDATA[%s]]></desc>\n" % (docs[0])
if len(pm["parameters"]) > 0:
luaDocOut += "\t\t\t\t<params>\n"
paramIndex = 0
for param in pm["parameters"]:
if "name" in param:
if not "type" in param:
continue
if param["type"] == "0":
continue
if param["type"].find("vector<") != -1:
vectorClass = param["type"].replace("std::vector<", "").replace(">","").replace(" ", "")
luaDocOut += "\t\t\t\t\t<param name=\"%s\" param_array=\"true\" type=\"%s\">\n" % (param["name"], toLuaType(vectorClass.replace("*","")))
else:
luaDocOut += "\t\t\t\t\t<param name=\"%s\" type=\"%s\">\n" % (param["name"], toLuaType(typeFilter(param["type"]).replace("*","")))
if docs != None:
if len(docs) > paramIndex+1:
cdoc = docs[paramIndex+1].split()
cdoc.pop(0)
luaDocOut += "\t\t\t\t\t\t<desc><![CDATA[%s]]></desc>\n" % (" ".join(cdoc).replace("\n", ""))
luaDocOut += "\t\t\t\t\t</param>\n"
paramIndex = paramIndex + 1
luaDocOut += "\t\t\t\t</params>\n"
luaDocOut += "\t\t\t</method>\n"
basicType = False
voidRet = False
vectorReturn = False
vectorReturnClass = ""
# Def: True if method takes a lua_State* as argument (i.e.: no preprocessing by us)
rawMethod = len(pm["parameters"]) > 0 and pm["parameters"][0].get("type","").find("lua_State") > -1
# Basic setup, C++ side: Add function to registry and start building wrapper function.
if pm["name"] == ckey: # It's a constructor
cppRegisterOut += "\t\t{\"%s\", %s_%s},\n" % (ckey, libName, ckey)
wrappersHeaderOut += "static int %s_%s(lua_State *L) {\n" % (libName, ckey)
idx = 1 # Def: Current stack depth (TODO: Figure out, is this correct?)
else: # It's not a constructor
cppRegisterOut += "\t\t{\"%s_%s\", %s_%s_%s},\n" % (ckey, pm["name"], libName, ckey, pm["name"])
wrappersHeaderOut += "static int %s_%s_%s(lua_State *L) {\n" % (libName, ckey, pm["name"])
# Skip static methods (TODO: Figure out, why is this being done here?). # FIXME
if pm["rtnType"].find("static ") == -1:
wrappersHeaderOut += "\tluaL_checktype(L, 1, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\t%s *inst = (%s*) *((PolyBase**)lua_touserdata(L, 1));\n" % (ckey, ckey)
idx = 2
else:
idx = 1
if rawMethod:
wrappersHeaderOut += "\treturn inst->%s(L);\n" % (pm["name"])
else:
# Generate C++ side parameter pushing
paramlist = []
lparamlist = []
for param in pm["parameters"]:
if not "type" in param:
continue
if param["type"] == "0":
continue
param["type"] = typeFilter(param["type"])
param["name"] = param["name"].replace("end", "_end").replace("repeat", "_repeat")
if"type" in param:
luatype = "LUA_TUSERDATA"
checkfunc = "lua_isuserdata"
if param["type"].find("*") > -1:
luafunc = "(%s) *((PolyBase**)lua_touserdata" % (param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
elif param["type"].find("&") > -1:
luafunc = "*(%s*) *((PolyBase**)lua_touserdata" % (param["type"].replace("const", "").replace("&", "").replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
else:
luafunc = "*(%s*) *((PolyBase**)lua_touserdata" % (param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle"))
lend = ".__ptr"
luafuncsuffix = ")"
if param["type"] == "int" or param["type"] == "unsigned int" or param["type"] == "short":
luafunc = "lua_tointeger"
luatype = "LUA_TNUMBER"
checkfunc = "lua_isnumber"
luafuncsuffix = ""
lend = ""
if param["type"] == "PolyKEY":
luafunc = "(PolyKEY)lua_tointeger"
luatype = "LUA_TNUMBER"
checkfunc = "lua_isnumber"
luafuncsuffix = ""
lend = ""
if param["type"] == "bool":
luafunc = "lua_toboolean"
luatype = "LUA_TBOOLEAN"
checkfunc = "lua_isboolean"
luafuncsuffix = " != 0"
lend = ""
if param["type"] == "Number" or param["type"] == "float" or param["type"] == "double":
luatype = "LUA_TNUMBER"
luafunc = "lua_tonumber"
checkfunc = "lua_isnumber"
luafuncsuffix = ""
lend = ""
if param["type"] == "String":
luatype = "LUA_TSTRING"
luafunc = "lua_tostring"
checkfunc = "lua_isstring"
luafuncsuffix = ""
lend = ""
param["type"] = param["type"].replace("Polygon", "Polycode::Polygon").replace("Rectangle", "Polycode::Rectangle")
if "defaltValue" in param:
if checkfunc != "lua_isuserdata" or (checkfunc == "lua_isuserdata" and param["defaltValue"] == "NULL"):
#param["defaltValue"] = param["defaltValue"].replace(" 0f", ".0f")
param["defaltValue"] = param["defaltValue"].replace(": :", "::")
#param["defaltValue"] = param["defaltValue"].replace("0 ", "0.")
param["defaltValue"] = re.sub(r'([0-9]+) ([0-9])+', r'\1.\2', param["defaltValue"])
wrappersHeaderOut += "\t%s %s;\n" % (param["type"], param["name"])
wrappersHeaderOut += "\tif(%s(L, %d)) {\n" % (checkfunc, idx)
wrappersHeaderOut += "\t\t%s = %s(L, %d)%s;\n" % (param["name"], luafunc, idx, luafuncsuffix)
wrappersHeaderOut += "\t} else {\n"
wrappersHeaderOut += "\t\t%s = %s;\n" % (param["name"], param["defaltValue"])
wrappersHeaderOut += "\t}\n"
else:
wrappersHeaderOut += "\tluaL_checktype(L, %d, %s);\n" % (idx, luatype);
if param["type"] == "String":
wrappersHeaderOut += "\t%s %s = String(%s(L, %d));\n" % (param["type"], param["name"], luafunc, idx)
else:
wrappersHeaderOut += "\t%s %s = %s(L, %d)%s;\n" % (param["type"], param["name"], luafunc, idx,luafuncsuffix)
else:
wrappersHeaderOut += "\tluaL_checktype(L, %d, %s);\n" % (idx, luatype);
if param["type"] == "String":
wrappersHeaderOut += "\t%s %s = String(%s(L, %d));\n" % (param["type"], param["name"], luafunc, idx)
else:
wrappersHeaderOut += "\t%s %s = %s(L, %d)%s;\n" % (param["type"], param["name"], luafunc, idx, luafuncsuffix)
paramlist.append(param["name"])
lparamlist.append(param["name"]+lend)
idx = idx +1 # Param parse success-- mark the increased stack
# Generate C++-side method call / generate return value
if pm["name"] == ckey: # If constructor
if ckey == "EventHandler": # See LuaEventHandler above
wrappersHeaderOut += "\tLuaEventHandler *inst = new LuaEventHandler();\n"
wrappersHeaderOut += "\tinst->wrapperIndex = luaL_ref(L, LUA_REGISTRYINDEX );\n"
wrappersHeaderOut += "\tinst->L = L;\n"
else:
wrappersHeaderOut += "\t%s *inst = new %s(%s);\n" % (ckey, ckey, ", ".join(paramlist))
wrappersHeaderOut += "\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\t*userdataPtr = (PolyBase*)inst;\n"
wrappersHeaderOut += "\tluaL_getmetatable(L, \"%s.%s\");\n" % (libName, ckey)
wrappersHeaderOut += "\tlua_setmetatable(L, -2);\n"
wrappersHeaderOut += "\treturn 1;\n"
else: #If non-constructor
if pm["rtnType"].find("static ") == -1: # If non-static
call = "inst->%s(%s)" % (pm["name"], ", ".join(paramlist))
else: # If static (FIXME: Why doesn't this work?)
call = "%s::%s(%s)" % (ckey, pm["name"], ", ".join(paramlist))
#check if returning a template
if pm["rtnType"].find("<") > -1:
#if returning a vector, convert to lua table
if pm["rtnType"].find("std::vector") > -1:
vectorReturnClass = pm["rtnType"].replace("std::vector<", "").replace(">","").replace(" ", "")
if vectorReturnClass.find("&") == -1 and vectorReturnClass.find("*") > -1: #FIXME: return references to std::vectors and basic types
vectorReturn = True
wrappersHeaderOut += "\tstd::vector<%s> retVector = %s;\n" % (vectorReturnClass,call)
wrappersHeaderOut += "\tlua_newtable(L);\n"
wrappersHeaderOut += "\tfor(int i=0; i < retVector.size(); i++) {\n"
wrappersHeaderOut += "\t\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\t\t*userdataPtr = (PolyBase*)retVector[i];\n"
wrappersHeaderOut += "\t\tlua_rawseti(L, -2, i+1);\n"
wrappersHeaderOut += "\t}\n"
wrappersHeaderOut += "\treturn 1;\n"
else:
wrappersHeaderOut += "\treturn 0;\n"
# else If void-typed:
elif pm["rtnType"] == "void" or pm["rtnType"] == "static void" or pm["rtnType"] == "virtual void" or pm["rtnType"] == "inline void":
wrappersHeaderOut += "\t%s;\n" % (call)
basicType = True
voidRet = True
vectorReturn = False
wrappersHeaderOut += "\treturn 0;\n" # 0 arguments returned
else: # If there is a return value:
# What type is the return value? Default to pointer
outfunc = "this_shouldnt_happen"
retFunc = ""
basicType = False
vectorReturn = False
if pm["rtnType"] == "Number" or pm["rtnType"] == "inline Number":
outfunc = "lua_pushnumber"
basicType = True
if pm["rtnType"] == "String" or pm["rtnType"] == "static String": # TODO: Path for STL strings?
outfunc = "lua_pushstring"
basicType = True
retFunc = ".c_str()"
if pm["rtnType"] == "int" or pm["rtnType"] == "unsigned int" or pm["rtnType"] == "static int" or pm["rtnType"] == "size_t" or pm["rtnType"] == "static size_t" or pm["rtnType"] == "long" or pm["rtnType"] == "unsigned int" or pm["rtnType"] == "static long" or pm["rtnType"] == "short" or pm["rtnType"] == "PolyKEY" or pm["rtnType"] == "wchar_t":
outfunc = "lua_pushinteger"
basicType = True
if pm["rtnType"] == "bool" or pm["rtnType"] == "static bool" or pm["rtnType"] == "virtual bool":
outfunc = "lua_pushboolean"
basicType = True
if pm["rtnType"].find("*") > -1: # Returned var is definitely a pointer.
wrappersHeaderOut += "\tPolyBase *ptrRetVal = (PolyBase*)%s%s;\n" % (call, retFunc)
wrappersHeaderOut += "\tif(ptrRetVal == NULL) {\n"
wrappersHeaderOut += "\t\tlua_pushnil(L);\n"
wrappersHeaderOut += "\t} else {\n"
wrappersHeaderOut += "\t\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\t\t*userdataPtr = ptrRetVal;\n"
wrappersHeaderOut += "\t}\n"
elif basicType == True: # Returned var has been flagged as a recognized primitive type
wrappersHeaderOut += "\t%s(L, %s%s);\n" % (outfunc, call, retFunc)
else: # Some static object is being returned. Convert it to a pointer, then return that.
className = pm["rtnType"].replace("const", "").replace("&", "").replace("inline", "").replace("virtual", "").replace("static", "")
if className == "Polygon": # Deal with potential windows.h conflict
className = "Polycode::Polygon"
if className == "Rectangle":
className = "Polycode::Rectangle"
if className == "Polycode : : Rectangle":
className = "Polycode::Rectangle"
wrappersHeaderOut += "\t%s *retInst = new %s();\n" % (className, className)
wrappersHeaderOut += "\t*retInst = %s;\n" % (call)
wrappersHeaderOut += "\tPolyBase **userdataPtr = (PolyBase**)lua_newuserdata(L, sizeof(PolyBase*));\n"
wrappersHeaderOut += "\tluaL_getmetatable(L, \"%s.%s\");\n" % (libName, className)
wrappersHeaderOut += "\tlua_setmetatable(L, -2);\n"
wrappersHeaderOut += "\t*userdataPtr = (PolyBase*)retInst;\n"
wrappersHeaderOut += "\treturn 1;\n"
wrappersHeaderOut += "}\n\n" # Close out C++ generation
# Now generate the Lua side method.
if rawMethod:
luaClassBindingOut += "function %s:%s(...)\n" % (ckey, pm["name"])
luaClassBindingOut += "\treturn %s.%s_%s(self.__ptr, ...)\n" % (libName, ckey, pm["name"])
luaClassBindingOut += "end\n"
elif pm["name"] == ckey: # Constructors
luaClassBindingOut += "function %s:%s(...)\n" % (ckey, ckey)
luaClassBindingOut += "\tlocal arg = {...}\n"
if inherits:
luaClassBindingOut += "\tif type(arg[1]) == \"table\" and count(arg) == 1 then\n"
luaClassBindingOut += "\t\tif \"\"..arg[1].__classname == \"%s\" then\n" % (c["inherits"][0]["class"])
luaClassBindingOut += "\t\t\tself.__ptr = arg[1].__ptr\n"
luaClassBindingOut += "\t\t\treturn\n"
luaClassBindingOut += "\t\tend\n"
luaClassBindingOut += "\tend\n"
luaClassBindingOut += "\tfor k,v in pairs(arg) do\n"
luaClassBindingOut += "\t\tif type(v) == \"table\" then\n"
luaClassBindingOut += "\t\t\tif v.__ptr ~= nil then\n"
luaClassBindingOut += "\t\t\t\targ[k] = v.__ptr\n"
luaClassBindingOut += "\t\t\tend\n"
luaClassBindingOut += "\t\tend\n"
luaClassBindingOut += "\tend\n"
luaClassBindingOut += "\tif self.__ptr == nil and arg[1] ~= \"__skip_ptr__\" then\n"
if ckey == "EventHandler": # See LuaEventHandler above
luaClassBindingOut += "\t\tself.__ptr = %s.%s(self)\n" % (libName, ckey)
else:
luaClassBindingOut += "\t\tself.__ptr = %s.%s(unpack(arg))\n" % (libName, ckey)
luaClassBindingOut += "\tend\n"
luaClassBindingOut += "end\n\n"
else: # Non-constructors.
if pm["rtnType"].find("static ") == -1: # Non-static method
luaClassBindingOut += "function %s:%s(%s)\n" % (ckey, pm["name"], ", ".join(paramlist))
if len(lparamlist):
luaClassBindingOut += "\tlocal retVal = %s.%s_%s(self.__ptr, %s)\n" % (libName, ckey, pm["name"], ", ".join(lparamlist))
else:
luaClassBindingOut += "\tlocal retVal = %s.%s_%s(self.__ptr)\n" % (libName, ckey, pm["name"])
else: # Static method
luaClassBindingOut += "function %s.%s(%s)\n" % (ckey, pm["name"], ", ".join(paramlist))
if len(lparamlist):
luaClassBindingOut += "\tlocal retVal = %s.%s_%s(%s)\n" % (libName, ckey, pm["name"], ", ".join(lparamlist))
else:
luaClassBindingOut += "\tlocal retVal = %s.%s_%s()\n" % (libName, ckey, pm["name"])
if not voidRet: # Was there a return value?
if basicType == True: # Yes, a primitive
luaClassBindingOut += "\treturn retVal\n"
else: # Yes, a pointer was returned
if vectorReturn == True:
className = vectorReturnClass.replace("*", "")
luaClassBindingOut += template_returnPtrLookupArray("\t",template_quote(className),"retVal")
else:
className = pm["rtnType"].replace("const", "").replace("&", "").replace("inline", "").replace("virtual", "").replace("static", "").replace("*","").replace(" ", "")
luaClassBindingOut += template_returnPtrLookup("\t",template_quote(className),"retVal")
luaClassBindingOut += "end\n\n" # Close out Lua generation
parsed_methods.append(pm["name"]) # Method parse success
luaDocOut += "\t\t</methods>\n"
# With methods out of the way, do some final cleanup:
# user pointer metatable creation in C++
cppLoaderOut += "\n\tluaL_newmetatable(L, \"%s.%s\");\n" % (libName, ckey)
if ckey not in disable_gc:
cppLoaderOut += "\tlua_pushstring(L, \"__gc\");\n"
cppLoaderOut += "\tlua_pushcfunction(L, %s_delete_%s);\n" % (libName, ckey)
cppLoaderOut += "\tlua_settable(L, -3);\n"
cppLoaderOut +="\tlua_pop(L, 1);\n"
# Delete method (C++ side)
cppRegisterOut += "\t\t{\"delete_%s\", %s_delete_%s},\n" % (ckey, libName, ckey)
wrappersHeaderOut += "static int %s_delete_%s(lua_State *L) {\n" % (libName, ckey)
wrappersHeaderOut += "\tluaL_checktype(L, 1, LUA_TUSERDATA);\n"
wrappersHeaderOut += "\tPolyBase **inst = (PolyBase**)lua_touserdata(L, 1);\n"
wrappersHeaderOut += "\tdelete ((%s*) *inst);\n" % (ckey)
wrappersHeaderOut += "\t*inst = NULL;\n"
wrappersHeaderOut += "\treturn 0;\n"
wrappersHeaderOut += "}\n\n"
# Delete method (Lua side)
luaClassBindingOut += "function %s:__delete()\n" % (ckey)
luaClassBindingOut += "\tif self then %s.delete_%s(self.__ptr) end\n" % (libName, ckey)
luaClassBindingOut += "end\n"
# Add class to lua index file
luaIndexOut += "require \"%s/%s\"\n" % (prefix, ckey)
# Write lua file
mkdir_p(apiClassPath)
if ckey != "EventDispatcher":
fout = open("%s/%s.lua" % (apiClassPath, ckey), "w")
fout.write(luaClassBindingOut)
luaDocOut += "\t</class>\n"
except CppHeaderParser.CppParseError as e: # One input file parse; failed.
print(e)
sys.exit(1)
luaDocOut += "</classes>\n"
luaDocOut += "</docs>\n"
# Footer boilerplate for wrappersHeaderOut and cppRegisterOut.
wrappersHeaderOut += "} // namespace Polycode\n"
cppRegisterOut += "\t\t{NULL, NULL}\n"
cppRegisterOut += "\t};\n"
cppRegisterOut += "\tluaL_openlib(L, \"%s\", %sLib, 0);\n" % (libName, libSmallName)
cppRegisterOut += cppLoaderOut
cppRegisterOut += "\treturn 1;\n"
cppRegisterOut += "}"
cppRegisterHeaderOut = "" # Def: Global C++ *LUA.h
cppRegisterHeaderOut += "#pragma once\n"
cppRegisterHeaderOut += "#include <%s>\n" % (mainInclude)
cppRegisterHeaderOut += "extern \"C\" {\n"
cppRegisterHeaderOut += "#include <stdio.h>\n"
cppRegisterHeaderOut += "#include \"lua.h\"\n"
cppRegisterHeaderOut += "#include \"lualib.h\"\n"
cppRegisterHeaderOut += "#include \"lauxlib.h\"\n"
cppRegisterHeaderOut += "int _PolyExport luaopen_%s(lua_State *L);\n" % (prefix)
cppRegisterHeaderOut += "}\n"
# Write out global files
mkdir_p(includePath)
mkdir_p(apiPath)
mkdir_p(sourcePath)
fout = open("%s/%sLUA.h" % (includePath, prefix), "w")
fout.write(cppRegisterHeaderOut)
if luaDocPath is None:
luaDocPath = "../../../Documentation/Lua/xml"
if luaDocPath != "-":
fout = open("%s/%s.xml" % (luaDocPath, prefix), "w")
fout.write(luaDocOut)
fout = open("%s/%s.lua" % (apiPath, prefix), "w")
fout.write(luaIndexOut)
fout = open("%s/%sLUAWrappers.h" % (includePath, prefix), "w")
fout.write(wrappersHeaderOut)
fout = open("%s/%sLUA.cpp" % (sourcePath, prefix), "w")
fout.write(cppRegisterOut)
# Create .pak zip archive
pattern = '*.lua'
os.chdir(apiPath)
if libName == "Polycore":
with ZipFile("api.pak", 'w') as myzip:
for root, dirs, files in os.walk("."):
for filename in fnmatch.filter(files, pattern):
myzip.write(os.path.join(root, filename))
else:
with ZipFile("%s.pak" % (libName), 'w') as myzip:
for root, dirs, files in os.walk("."):
for filename in fnmatch.filter(files, pattern):
myzip.write(os.path.join(root, filename))
if len(sys.argv) < 10:
print ("Usage:\n%s [input path] [prefix] [main include] [lib small name] [lib name] [api path] [api class-path] [include path] [source path] [lua doc path (optional) (or - for omit)] [inherit-in-module-file path (optional)]" % (sys.argv[0]))
sys.exit(1)
else:
createLUABindings(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9], sys.argv[10] if len(sys.argv)>10 else None, sys.argv[11] if len(sys.argv)>11 else None)
| mit | -3,718,816,339,603,664,400 | 47.559579 | 524 | 0.593596 | false |
Ledoux/ShareYourSystem | Pythonlogy/draft/Simulaters/Brianer/draft/03_ExampleDoc.py | 2 | 1598 | #ImportModules
import ShareYourSystem as SYS
#Definition
MyBrianer=SYS.BrianerClass(
).set(
'-Populations',
[
(
'ManagingBeforeSetVariable',
{
'NeurongroupingBrianKwargVariablesDict':
{
'model':
'''
dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt
dge/dt = -ge/(5*ms) : volt
dgi/dt = -gi/(10*ms) : volt
''',
'threshold':'v>-50*mV',
'reset':'v=-60*mV'
},
'get':'/-Spikes/|Run',
}
),
(
'set',
{
'#liarg:#lambda':{
'|#NeuronStr':{
'SimulatingUnitsInt':3200,
'array':[
[
['-<->'],
['|Postlets<->Prelets'],
['|#direct:_^_|E','|#direct:_^_|I']
],
[
{},
{},
{
'SynapsingBrianKwargVariablesDict':{'pre':'#PreStr'},
'SynapsingProbabilityVariable':0.02
}
]
]
}
},
'#map':[
['#NeuronStr','#PreStr'],
[
['E','ge+=1.62*mV'],
['I','gi-=9*mV']
]
]
}
)
]
).network(
['Populations']
)
#print
print('MyBrianer is ')
SYS._print(MyBrianer)
"""
.brian(
)
#init
import brian2
map(
lambda __BrianedNeuronGroup:
__BrianedNeuronGroup.__setattr__(
'v',
-60*brian2.mV
),
MyBrianer.BrianedNeuronGroupsList
)
#run
MyBrianer.run(300)
#plot
ME=MyBrianer['/-Populations/|E/-Spikes/|Run'].SpikeMonitor
MI=MyBrianer['/-Populations/|I/-Spikes/|Run'].SpikeMonitor
from matplotlib import pyplot
pyplot.plot(ME.t/brian2.ms, ME.i, 'r.')
pyplot.plot(MI.t/brian2.ms, ME.source.N+MI.i, 'b.')
pyplot.show()
"""
| mit | 1,122,958,059,654,174,000 | 15.821053 | 63 | 0.500626 | false |
agriggio/pysmt | pysmt/solvers/eager.py | 2 | 3297 | #
# This file is part of pySMT.
#
# Copyright 2014 Andrea Micheli and Marco Gario
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pysmt.solvers.solver import Model
from pysmt.environment import get_env
from pysmt.exceptions import PysmtTypeError
class EagerModel(Model):
"""A model that does not require the existence of a solver instance.
This is useful when we want to change the state of the solver but
maintain a version of the previously found model. An EagerModel
can also be constructed manually, and provides a simple way to
define a model.
"""
def __init__(self, assignment, environment=None):
if environment is None:
environment = get_env()
Model.__init__(self, environment)
self.environment = environment
self.assignment = dict(assignment)
# Create a copy of the assignments to memoize completions
self.completed_assignment = dict(self.assignment)
def get_value(self, formula, model_completion=True):
if model_completion:
syms = formula.get_free_variables()
self._complete_model(syms)
r = formula.substitute(self.completed_assignment)
else:
r = formula.substitute(self.assignment)
res = r.simplify()
if not res.is_constant():
raise PysmtTypeError("Was expecting a constant but got %s" % res)
return res
def _complete_model(self, symbols):
undefined_symbols = (s for s in symbols
if s not in self.completed_assignment)
mgr = self.environment.formula_manager
for s in undefined_symbols:
if not s.is_symbol():
raise PysmtTypeError("Was expecting a symbol but got %s" %s)
if s.symbol_type().is_bool_type():
value = mgr.Bool(False)
elif s.symbol_type().is_real_type():
value = mgr.Real(0)
elif s.symbol_type().is_int_type():
value = mgr.Int(0)
elif s.symbol_type().is_bv_type():
value = mgr.BVZero(s.bv_width())
else:
raise PysmtTypeError("Unhandled type for %s: %s" %
(s, s.symbol_type()))
self.completed_assignment[s] = value
def iterator_over(self, language):
for x in language:
yield x, self.get_value(x, model_completion=True)
def __iter__(self):
"""Overloading of iterator from Model. We iterate only on the
variables defined in the assignment.
"""
return iter(self.assignment.items())
def __contains__(self, x):
"""Returns whether the model contains a value for 'x'."""
return x in self.assignment
| apache-2.0 | -1,543,510,063,000,445,000 | 35.633333 | 77 | 0.622081 | false |
pymedusa/SickRage | ext/tmdbsimple/tv.py | 1 | 20917 | # -*- coding: utf-8 -*-
"""
tmdbsimple.tv
~~~~~~~~~~~~~
This module implements the TV, TV Seasons, TV Episodes, and Networks
functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2020 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class TV(TMDB):
"""
TV functionality.
See: https://developers.themoviedb.org/3/tv
"""
BASE_PATH = 'tv'
URLS = {
'info': '/{id}',
'alternative_titles': '/{id}/alternative_titles',
'content_ratings': '/{id}/content_ratings',
'credits': '/{id}/credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'rating': '/{id}/rating',
'similar': '/{id}/similar',
'recommendations': '/{id}/recommendations',
'translations': '/{id}/translations',
'videos': '/{id}/videos',
'keywords': '/{id}/keywords',
'latest': '/latest',
'on_the_air': '/on_the_air',
'airing_today': '/airing_today',
'top_rated': '/top_rated',
'popular': '/popular',
}
def __init__(self, id=0):
super(TV, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the primary information about a TV series by id.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def alternative_titles(self, **kwargs):
"""
Get the alternative titles for a specific tv id.
Args:
language: (optional) ISO 3166-1 code.
append_to_response: (optional) Comma separated, any tv method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('alternative_titles')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def content_ratings(self, **kwargs):
"""
Get the content ratings for a TV Series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('content_ratings')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew information about a TV series. Just like the
website, we pull this information from the last season of the series.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any collection
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV series.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters and backdrops) for a TV series.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV show. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def similar(self, **kwargs):
"""
Get the similar TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any TV method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('similar')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def recommendations(self, **kwargs):
"""
Get the recommendations for TV series for a specific TV series id.
Args:
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('recommendations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def translations(self, **kwargs):
"""
Get the list of translations that exist for a TV series. These
translations cascade down to the episode level.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('translations')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV series (trailers, opening
credits, etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def keywords(self, **kwargs):
"""
Get the list of keywords related to a TV series.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('keywords')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the most newly created TV show. This is a live response
and will continuously change.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def on_the_air(self, **kwargs):
"""
Get the list of TV shows that are currently on the air. This query
looks for any TV show that has an episode with an air date in the
next 7 days.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('on_the_air')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def airing_today(self, **kwargs):
"""
Get the list of TV shows that air today. Without a specified timezone,
this query defaults to EST (Eastern Time UTC-05:00).
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
timezone: (optional) Valid value from the list of timezones.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('airing_today')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def top_rated(self, **kwargs):
"""
Get the list of top rated TV shows. By default, this list will only
include TV shows that have 2 or more votes. This list refreshes every
day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('top_rated')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular TV shows. This list refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Seasons(TMDB):
"""
TV Seasons functionality.
See: https://developers.themoviedb.org/3/tv-seasons
"""
BASE_PATH = 'tv/{tv_id}/season/{season_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'videos': '/videos',
}
def __init__(self, tv_id, season_number):
super(TV_Seasons, self).__init__()
self.tv_id = tv_id
self.season_number = season_number
def info(self, **kwargs):
"""
Get the primary information about a TV season by its season number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the cast & crew credits for a TV season by season number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (posters) that we have stored for a TV season by season
number.
Args:
language: (optional) ISO 639 code.
include_image_language: (optional) Comma separated, a valid
ISO 69-1.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV season (trailers, teasers,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Episodes(TMDB):
"""
TV Episodes functionality.
See: https://developers.themoviedb.org/3/tv-episodes
"""
BASE_PATH = 'tv/{tv_id}/season/{season_number}/episode/{episode_number}'
URLS = {
'info': '',
'credits': '/credits',
'external_ids': '/external_ids',
'images': '/images',
'rating': '/rating',
'videos': '/videos',
}
def __init__(self, tv_id, season_number, episode_number):
super(TV_Episodes, self).__init__()
self.tv_id = tv_id
self.season_number = season_number
self.episode_number = episode_number
def info(self, **kwargs):
"""
Get the primary information about a TV episode by combination of a
season and episode number.
Args:
language: (optional) ISO 639 code.
append_to_response: (optional) Comma separated, any TV series
method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def credits(self, **kwargs):
"""
Get the TV episode credits by combination of season and episode number.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path('credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a TV episode by combination of a season and
episode number.
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path(
'external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images (episode stills) for a TV episode by combination of a
season and episode number. Since episode stills don't have a language,
this call will always return all images.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def rating(self, **kwargs):
"""
This method lets users rate a TV episode. A valid session id or guest
session id is required.
Args:
session_id: see Authentication.
guest_session_id: see Authentication.
value: Rating value.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path('rating')
payload = {
'value': kwargs.pop('value', None),
}
response = self._POST(path, kwargs, payload)
self._set_attrs_to_values(response)
return response
def videos(self, **kwargs):
"""
Get the videos that have been added to a TV episode (teasers, clips,
etc...).
Args:
language: (optional) ISO 639 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_tv_id_season_number_episode_number_path('videos')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class TV_Changes(TMDB):
"""
Changes functionality for TV Series, Season and Episode.
See: https://developers.themoviedb.org/3/tv/get-tv-changes
https://developers.themoviedb.org/3/tv-seasons/get-tv-season-changes
https://developers.themoviedb.org/3/tv-episodes/get-tv-episode-changes
"""
BASE_PATH = 'tv'
URLS = {
'series': '/{id}/changes', # id => tv_id
'season': '/season/{id}/changes', # id => season_id
'episode': '/episode/{id}/changes', # id => episode_id
}
def __init__(self, id=0):
super(TV_Changes, self).__init__()
self.id = id
def series(self, **kwargs):
"""
Get the changes for a specific series id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The
maximum number of days that can be returned in a single request is 14.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('series')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def season(self, **kwargs):
"""
Get the changes for a specific season id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The
maximum number of days that can be returned in a single request is 14.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('season')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def episode(self, **kwargs):
"""
Get the changes for a specific episode id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The
maximum number of days that can be returned in a single request is 14.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('episode')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Networks(TMDB):
"""
Networks functionality.
See: https://developers.themoviedb.org/3/networks
"""
BASE_PATH = 'network'
URLS = {
'info': '/{id}',
}
def __init__(self, id):
super(Networks, self).__init__()
self.id = id
def info(self, **kwargs):
"""
This method is used to retrieve the basic information about a TV
network. You can use this ID to search for TV shows with the discover.
At this time we don't have much but this will be fleshed out over time.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| gpl-3.0 | 5,543,613,008,889,226,000 | 29.053161 | 79 | 0.57011 | false |
garyfeng/pybrain | pybrain/rl/environments/mazes/tasks/pomdp.py | 31 | 1465 | __author__ = 'Tom Schaul, [email protected]'
from scipy import ndarray
from pybrain.rl.environments import EpisodicTask
from pybrain.utilities import Named, drawIndex
class POMDPTask(EpisodicTask, Named):
""" Partially observable episodic MDP (with discrete actions)
Has actions that can be performed, and observations in every state.
By default, the observation is a vector, and the actions are integers.
"""
# number of observations
observations = 4
# number of possible actions
actions = 4
# maximal number of steps before the episode is stopped
maxSteps = None
# the lower bound on the reward value
minReward = 0
def __init__(self, **args):
self.setArgs(**args)
self.steps = 0
@property
def indim(self):
return self.actions
@property
def outdim(self):
return self.observations
def reset(self):
self.steps = 0
EpisodicTask.reset(self)
def isFinished(self):
if self.maxSteps != None:
return self.steps >= self.maxSteps
return False
def performAction(self, action):
""" POMDP tasks, as they have discrete actions, can me used by providing either an index,
or an array with a 1-in-n coding (which can be stochastic). """
if type(action) == ndarray:
action = drawIndex(action, tolerant = True)
self.steps += 1
EpisodicTask.performAction(self, action) | bsd-3-clause | -9,210,264,471,081,991,000 | 26.660377 | 97 | 0.649829 | false |
ESOedX/edx-platform | lms/djangoapps/support/views/index.py | 1 | 1900 | """
Index view for the support app.
"""
from __future__ import absolute_import
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from edxmako.shortcuts import render_to_response
from support.decorators import require_support_permission
SUPPORT_INDEX_URLS = [
{
"url": reverse_lazy("support:certificates"),
"name": _("Certificates"),
"description": _("View and regenerate certificates."),
},
# DEPRECATION WARNING: We can remove this end-point
# once shoppingcart has been replaced by the E-Commerce service.
{
"url": reverse_lazy("support:refund"),
"name": _("Manual Refund"),
"description": _("Track refunds issued directly through CyberSource."),
},
{
"url": reverse_lazy("support:enrollment"),
"name": _("Enrollment"),
"description": _("View and update learner enrollments."),
},
{
"url": reverse_lazy("support:manage_user"),
"name": _("Manage User"),
"description": _("Disable User Account"),
},
{
"url": reverse_lazy("support:course_entitlement"),
"name": _("Entitlements"),
"description": _("View, create, and reissue learner entitlements"),
},
{
"url": reverse_lazy("support:feature_based_enrollments"),
"name": _("Feature Based Enrollments"),
"description": _("View feature based enrollment settings"),
},
{
"url": reverse_lazy("support:link_program_enrollments"),
"name": _("Link Program Enrollments"),
"description": _("Link LMS users to program enrollments"),
},
]
@require_support_permission
def index(request): # pylint: disable=unused-argument
"""Render the support index view. """
context = {
"urls": SUPPORT_INDEX_URLS
}
return render_to_response("support/index.html", context)
| agpl-3.0 | -4,170,745,403,023,848,000 | 30.666667 | 79 | 0.619474 | false |
HaebinShin/tensorflow | tensorflow/python/kernel_tests/scatter_ops_test.py | 8 | 6156 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
class ScatterTest(tf.test.TestCase):
def _VariableRankTest(self, np_scatter, tf_scatter, vtype, itype, use_gpu,
repeat_indices=False):
np.random.seed(8)
with self.test_session(use_gpu=use_gpu):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size//2]
for _ in range(size-size//2):
# Randomly append some repeats.
indices = np.append(indices, indices[np.random.randint(size//2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
updates = _AsType(np.random.randn(*(indices_shape + extra_shape)),
vtype)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = tf.Variable(old)
ref.initializer.run()
tf_scatter(ref, indices, updates).eval()
# Compare
self.assertAllClose(ref.eval(), new)
def _VariableRankTests(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
for use_gpu in (False, True):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu)
def testVariableRankUpdate(self):
def update(ref, indices, updates):
ref[indices] = updates
self._VariableRankTests(update, tf.scatter_update)
def testVariableRankAdd(self):
self._VariableRankTests(_NumpyAdd, tf.scatter_add)
def testVariableRankSub(self):
self._VariableRankTests(_NumpySub, tf.scatter_sub)
def _ScatterRepeatIndicesTest(self, np_scatter, tf_scatter):
for vtype in (np.float32, np.float64):
for itype in (np.int32, np.int64):
for use_gpu in (False, True):
self._VariableRankTest(np_scatter, tf_scatter, vtype, itype, use_gpu,
repeat_indices=True)
def testScatterRepeatIndices(self):
"""This tests scatter_add using indices that repeat."""
self._ScatterRepeatIndicesTest(_NumpyAdd, tf.scatter_add)
self._ScatterRepeatIndicesTest(_NumpySub, tf.scatter_sub)
def testBooleanScatterUpdate(self):
with self.test_session(use_gpu=False) as session:
var = tf.Variable([True, False])
update0 = tf.scatter_update(var, 1, True)
update1 = tf.scatter_update(var, tf.constant(0, dtype=tf.int64), False)
var.initializer.run()
session.run([update0, update1])
self.assertAllEqual([False, True], var.eval())
def testScatterOutOfRangeCpu(self):
for op in (tf.scatter_add, tf.scatter_sub, tf.scatter_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
with self.test_session(use_gpu=False):
ref = tf.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(r'indices\[0\] = -1 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
op(ref, indices, updates).eval()
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if not tf.test.IsBuiltWithCuda():
return
for op in (tf.scatter_add, tf.scatter_sub, tf.scatter_update):
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with self.test_session(force_gpu=True):
ref = tf.Variable(params)
ref.initializer.run()
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
op(ref, indices, updates).eval()
# Indicies out of range should not fail.
indices = np.array([-1, 0, 5])
op(ref, indices, updates).eval()
indices = np.array([2, 0, 6])
op(ref, indices, updates).eval()
if __name__ == "__main__":
tf.test.main()
| apache-2.0 | -4,363,985,112,631,941,000 | 36.766871 | 80 | 0.624919 | false |
dcbaker/alot | alot/completion.py | 1 | 22592 | # Copyright (C) 2011-2012 Patrick Totzke <[email protected]>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import abc
import argparse
import glob
import logging
import os
import re
from . import crypto
from . import commands
from .buffers import EnvelopeBuffer
from .settings import settings
from .utils.booleanaction import BooleanAction
from .helper import split_commandline
from .addressbook import AddressbookError
from .errors import CompletionError
class Completer(object):
"""base class for completers"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def complete(self, original, pos):
"""returns a list of completions and cursor positions for the
string original from position pos on.
:param original: the string to complete
:type original: str
:param pos: starting position to complete from
:type pos: int
:returns: pairs of completed string and cursor position in the
new string
:rtype: list of (str, int)
:raises: :exc:`CompletionError`
"""
pass
def relevant_part(self, original, pos, sep=' '):
"""
calculates the subword in a `sep`-splitted list of substrings of
`original` that `pos` is ia.n
"""
start = original.rfind(sep, 0, pos) + 1
end = original.find(sep, pos - 1)
if end == -1:
end = len(original)
return original[start:end], start, end, pos - start
class StringlistCompleter(Completer):
"""completer for a fixed list of strings"""
def __init__(self, resultlist, ignorecase=True, match_anywhere=False):
"""
:param resultlist: strings used for completion
:type resultlist: list of str
:param liberal: match case insensitive and not prefix-only
:type liberal: bool
"""
self.resultlist = resultlist
self.flags = re.IGNORECASE if ignorecase else 0
self.match_anywhere = match_anywhere
def complete(self, original, pos):
pref = original[:pos]
re_prefix = '.*' if self.match_anywhere else ''
def match(s, m):
r = re_prefix + m + '.*'
return re.match(r, s, flags=self.flags) is not None
return [(a, len(a)) for a in self.resultlist if match(a, pref)]
class MultipleSelectionCompleter(Completer):
"""
Meta-Completer that turns any Completer into one that deals with a list of
completion strings using the wrapped Completer.
This allows for example to easily construct a completer for comma separated
recipient-lists using a :class:`ContactsCompleter`.
"""
def __init__(self, completer, separator=', '):
"""
:param completer: completer to use for individual substrings
:type completer: Completer
:param separator: separator used to split the completion string into
substrings to be fed to `completer`.
:type separator: str
"""
self._completer = completer
self._separator = separator
def relevant_part(self, original, pos):
"""
calculates the subword of `original` that `pos` is in
"""
start = original.rfind(self._separator, 0, pos)
if start == -1:
start = 0
else:
start = start + len(self._separator)
end = original.find(self._separator, pos - 1)
if end == -1:
end = len(original)
return original[start:end], start, end, pos - start
def complete(self, original, pos):
mypart, start, end, mypos = self.relevant_part(original, pos)
res = []
for c, _ in self._completer.complete(mypart, mypos):
newprefix = original[:start] + c
if not original[end:].startswith(self._separator):
newprefix += self._separator
res.append((newprefix + original[end:], len(newprefix)))
return res
class QueryCompleter(Completer):
"""completion for a notmuch query string"""
def __init__(self, dbman):
"""
:param dbman: used to look up avaliable tagstrings
:type dbman: :class:`~alot.db.DBManager`
"""
self.dbman = dbman
abooks = settings.get_addressbooks()
self._abookscompleter = AbooksCompleter(abooks, addressesonly=True)
self._tagcompleter = TagCompleter(dbman)
self.keywords = ['tag', 'from', 'to', 'subject', 'attachment',
'is', 'id', 'thread', 'folder']
def complete(self, original, pos):
mypart, start, end, mypos = self.relevant_part(original, pos)
myprefix = mypart[:mypos]
m = re.search(r'(tag|is|to|from):(\w*)', myprefix)
if m:
cmd, _ = m.groups()
cmdlen = len(cmd) + 1 # length of the keyword part incld colon
if cmd in ['to', 'from']:
localres = self._abookscompleter.complete(mypart[cmdlen:],
mypos - cmdlen)
else:
localres = self._tagcompleter.complete(mypart[cmdlen:],
mypos - cmdlen)
resultlist = []
for ltxt, lpos in localres:
newtext = original[:start] + cmd + ':' + ltxt + original[end:]
newpos = start + len(cmd) + 1 + lpos
resultlist.append((newtext, newpos))
return resultlist
else:
matched = (t for t in self.keywords if t.startswith(myprefix))
resultlist = []
for keyword in matched:
newprefix = original[:start] + keyword + ':'
resultlist.append((newprefix + original[end:], len(newprefix)))
return resultlist
class TagCompleter(StringlistCompleter):
"""complete a tagstring"""
def __init__(self, dbman):
"""
:param dbman: used to look up avaliable tagstrings
:type dbman: :class:`~alot.db.DBManager`
"""
resultlist = dbman.get_all_tags()
StringlistCompleter.__init__(self, resultlist)
class TagsCompleter(MultipleSelectionCompleter):
"""completion for a comma separated list of tagstrings"""
def __init__(self, dbman):
"""
:param dbman: used to look up avaliable tagstrings
:type dbman: :class:`~alot.db.DBManager`
"""
self._completer = TagCompleter(dbman)
self._separator = ','
class ContactsCompleter(MultipleSelectionCompleter):
"""completes contacts from given address books"""
def __init__(self, abooks, addressesonly=False):
"""
:param abooks: used to look up email addresses
:type abooks: list of :class:`~alot.account.AddresBook`
:param addressesonly: only insert address, not the realname of the
contact
:type addressesonly: bool
"""
self._completer = AbooksCompleter(abooks, addressesonly=addressesonly)
self._separator = ', '
class AbooksCompleter(Completer):
"""completes a contact from given address books"""
def __init__(self, abooks, addressesonly=False):
"""
:param abooks: used to look up email addresses
:type abooks: list of :class:`~alot.account.AddresBook`
:param addressesonly: only insert address, not the realname of the
contact
:type addressesonly: bool
"""
self.abooks = abooks
self.addressesonly = addressesonly
def complete(self, original, pos):
if not self.abooks:
return []
prefix = original[:pos]
res = []
for abook in self.abooks:
try:
res = res + abook.lookup(prefix)
except AddressbookError as e:
raise CompletionError(e)
if self.addressesonly:
returnlist = [(email, len(email)) for (name, email) in res]
else:
returnlist = []
for name, email in res:
if name:
newtext = "%s <%s>" % (name, email)
else:
newtext = email
returnlist.append((newtext, len(newtext)))
return returnlist
class ArgparseOptionCompleter(Completer):
"""completes option parameters for a given argparse.Parser"""
def __init__(self, parser):
"""
:param parser: the option parser we look up parameter and choices from
:type parser: `argparse.ArgumentParser`
"""
self.parser = parser
self.actions = parser._optionals._actions
def complete(self, original, pos):
pref = original[:pos]
res = []
for act in self.actions:
if '=' in pref:
optionstring = pref[:pref.rfind('=') + 1]
# get choices
if 'choices' in act.__dict__:
# TODO: respect prefix
choices = act.choices or []
res = res + [optionstring + a for a in choices]
else:
for optionstring in act.option_strings:
if optionstring.startswith(pref):
# append '=' for options that await a string value
if isinstance(
act, (argparse._StoreAction, BooleanAction)):
optionstring += '='
res.append(optionstring)
return [(a, len(a)) for a in res]
class AccountCompleter(StringlistCompleter):
"""completes users' own mailaddresses"""
def __init__(self, **kwargs):
accounts = settings.get_accounts()
resultlist = ["%s <%s>" % (a.realname, a.address) for a in accounts]
StringlistCompleter.__init__(self, resultlist, match_anywhere=True,
**kwargs)
class CommandNameCompleter(Completer):
"""completes command names"""
def __init__(self, mode):
"""
:param mode: mode identifier
:type mode: str
"""
self.mode = mode
def complete(self, original, pos):
# TODO refine <tab> should get current querystring
commandprefix = original[:pos]
logging.debug('original="%s" prefix="%s"', original, commandprefix)
cmdlist = commands.COMMANDS['global'].copy()
cmdlist.update(commands.COMMANDS[self.mode])
matching = [t for t in cmdlist if t.startswith(commandprefix)]
return [(t, len(t)) for t in matching]
class CommandCompleter(Completer):
"""completes one command consisting of command name and parameters"""
def __init__(self, dbman, mode, currentbuffer=None):
"""
:param dbman: used to look up avaliable tagstrings
:type dbman: :class:`~alot.db.DBManager`
:param mode: mode identifier
:type mode: str
:param currentbuffer: currently active buffer. If defined, this will be
used to dynamically extract possible completion
strings
:type currentbuffer: :class:`~alot.buffers.Buffer`
"""
self.dbman = dbman
self.mode = mode
self.currentbuffer = currentbuffer
self._commandnamecompleter = CommandNameCompleter(mode)
self._querycompleter = QueryCompleter(dbman)
self._tagcompleter = TagCompleter(dbman)
abooks = settings.get_addressbooks()
self._contactscompleter = ContactsCompleter(abooks)
self._pathcompleter = PathCompleter()
self._accountscompleter = AccountCompleter()
self._secretkeyscompleter = CryptoKeyCompleter(private=True)
self._publickeyscompleter = CryptoKeyCompleter(private=False)
def complete(self, line, pos):
# remember how many preceding space characters we see until the command
# string starts. We'll continue to complete from there on and will add
# these whitespaces again at the very end
whitespaceoffset = len(line) - len(line.lstrip())
line = line[whitespaceoffset:]
pos = pos - whitespaceoffset
words = line.split(' ', 1)
res = []
if pos <= len(words[0]): # we complete commands
for cmd, cpos in self._commandnamecompleter.complete(line, pos):
newtext = ('%s %s' % (cmd, ' '.join(words[1:])))
res.append((newtext, cpos + 1))
else:
cmd, params = words
localpos = pos - (len(cmd) + 1)
parser = commands.lookup_parser(cmd, self.mode)
if parser is not None:
# set 'res' - the result set of matching completionstrings
# depending on the current mode and command
# detect if we are completing optional parameter
arguments_until_now = params[:localpos].split(' ')
all_optionals = True
logging.debug(str(arguments_until_now))
for a in arguments_until_now:
logging.debug(a)
if a and not a.startswith('-'):
all_optionals = False
# complete optional parameter if
# 1. all arguments prior to current position are optional
# 2. the parameter starts with '-' or we are at its beginning
if all_optionals:
myarg = arguments_until_now[-1]
start_myarg = params.rindex(myarg)
beforeme = params[:start_myarg]
# set up local stringlist completer
# and let it complete for given list of options
localcompleter = ArgparseOptionCompleter(parser)
localres = localcompleter.complete(myarg, len(myarg))
res = [(
beforeme + c, p + start_myarg) for (c, p) in localres]
# global
elif cmd == 'search':
res = self._querycompleter.complete(params, localpos)
elif cmd == 'help':
res = self._commandnamecompleter.complete(params, localpos)
elif cmd in ['compose']:
res = self._contactscompleter.complete(params, localpos)
# search
elif self.mode == 'search' and cmd == 'refine':
res = self._querycompleter.complete(params, localpos)
elif self.mode == 'search' and cmd in ['tag', 'retag', 'untag',
'toggletags']:
localcomp = MultipleSelectionCompleter(self._tagcompleter,
separator=',')
res = localcomp.complete(params, localpos)
elif self.mode == 'search' and cmd == 'toggletag':
localcomp = MultipleSelectionCompleter(self._tagcompleter,
separator=' ')
res = localcomp.complete(params, localpos)
# envelope
elif self.mode == 'envelope' and cmd == 'set':
plist = params.split(' ', 1)
if len(plist) == 1: # complete from header keys
localprefix = params
headers = ['Subject', 'To', 'Cc', 'Bcc', 'In-Reply-To',
'From']
localcompleter = StringlistCompleter(headers)
localres = localcompleter.complete(
localprefix, localpos)
res = [(c, p + 6) for (c, p) in localres]
else: # must have 2 elements
header, params = plist
localpos = localpos - (len(header) + 1)
if header.lower() in ['to', 'cc', 'bcc']:
res = self._contactscompleter.complete(params,
localpos)
elif header.lower() == 'from':
res = self._accountscompleter.complete(params,
localpos)
# prepend 'set ' + header and correct position
def f((completed, pos)):
return ('%s %s' % (header, completed),
pos + len(header) + 1)
logging.debug(f(r) for r in res)
elif self.mode == 'envelope' and cmd == 'unset':
plist = params.split(' ', 1)
if len(plist) == 1: # complete from header keys
localprefix = params
buf = self.currentbuffer
if buf:
if isinstance(buf, EnvelopeBuffer):
available = buf.envelope.headers.keys()
localcompleter = StringlistCompleter(available)
localres = localcompleter.complete(localprefix,
localpos)
res = [(c, p + 6) for (c, p) in localres]
elif self.mode == 'envelope' and cmd == 'attach':
res = self._pathcompleter.complete(params, localpos)
elif self.mode == 'envelope' and cmd in ['sign', 'togglesign']:
res = self._secretkeyscompleter.complete(params, localpos)
elif self.mode == 'envelope' and cmd in ['encrypt',
'rmencrypt',
'toggleencrypt']:
res = self._publickeyscompleter.complete(params, localpos)
# thread
elif self.mode == 'thread' and cmd == 'save':
res = self._pathcompleter.complete(params, localpos)
elif self.mode == 'thread' and cmd in ['fold', 'unfold',
'togglesource',
'toggleheaders']:
res = self._querycompleter.complete(params, localpos)
elif self.mode == 'thread' and cmd in ['tag', 'retag', 'untag',
'toggletags']:
localcomp = MultipleSelectionCompleter(self._tagcompleter,
separator=',')
res = localcomp.complete(params, localpos)
elif cmd == 'move':
directions = ['up', 'down', 'page up', 'page down']
if self.mode == 'thread':
directions += ['first', 'last', 'next', 'previous',
'last reply', 'first reply', 'parent',
'next unfolded', 'previous unfolded',
'next sibling', 'previous sibling']
localcompleter = StringlistCompleter(directions)
res = localcompleter.complete(params, localpos)
# prepend cmd and correct position
res = [('%s %s' % (cmd, t), p + len(cmd) +
1) for (t, p) in res]
# re-insert whitespaces and correct position
wso = whitespaceoffset
res = [(' ' * wso + cmdstr, p + wso) for cmdstr, p in res]
return res
class CommandLineCompleter(Completer):
"""completes command lines: semicolon separated command strings"""
def __init__(self, dbman, mode, currentbuffer=None):
"""
:param dbman: used to look up avaliable tagstrings
:type dbman: :class:`~alot.db.DBManager`
:param mode: mode identifier
:type mode: str
:param currentbuffer: currently active buffer. If defined, this will be
used to dynamically extract possible completion
strings
:type currentbuffer: :class:`~alot.buffers.Buffer`
"""
self._commandcompleter = CommandCompleter(dbman, mode, currentbuffer)
@staticmethod
def get_context(line, pos):
"""
computes start and end position of substring of line that is the
command string under given position
"""
commands = split_commandline(line) + ['']
i = 0
start = 0
end = len(commands[i])
while pos > end:
i += 1
start = end + 1
end += 1 + len(commands[i])
return start, end
def complete(self, line, pos):
cstart, cend = self.get_context(line, pos)
before = line[:cstart]
after = line[cend:]
cmdstring = line[cstart:cend]
cpos = pos - cstart
res = []
for ccmd, ccpos in self._commandcompleter.complete(cmdstring, cpos):
newtext = before + ccmd + after
newpos = pos + (ccpos - cpos)
res.append((newtext, newpos))
return res
class PathCompleter(Completer):
"""completion for paths"""
def complete(self, original, pos):
if not original:
return [('~/', 2)]
prefix = os.path.expanduser(original[:pos])
def escape(path):
return path.replace('\\', '\\\\').replace(' ', r'\ ')
def deescape(escaped_path):
return escaped_path.replace('\\ ', ' ').replace('\\\\', '\\')
def prep(path):
escaped_path = escape(path)
return escaped_path, len(escaped_path)
return [prep(g) for g in glob.glob(deescape(prefix) + '*')]
class CryptoKeyCompleter(StringlistCompleter):
"""completion for gpg keys"""
def __init__(self, private=False):
"""
:param private: return private keys
:type private: bool
"""
keys = crypto.list_keys(private=private)
resultlist = []
for k in keys:
for s in k.subkeys:
resultlist.append(s.keyid)
for u in k.uids:
resultlist.append(u.email)
StringlistCompleter.__init__(self, resultlist, match_anywhere=True)
| gpl-3.0 | -4,172,301,833,210,416,600 | 39.560144 | 79 | 0.530409 | false |
saurabh6790/frappe | frappe/model/mapper.py | 2 | 7813 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import cstr
from frappe.model import default_fields, table_fields
from six import string_types
@frappe.whitelist()
def make_mapped_doc(method, source_name, selected_children=None, args=None):
'''Returns the mapped document calling the given mapper method.
Sets selected_children as flags for the `get_mapped_doc` method.
Called from `open_mapped_doc` from create_new.js'''
for hook in frappe.get_hooks("override_whitelisted_methods", {}).get(method, []):
# override using the first hook
method = hook
break
method = frappe.get_attr(method)
if method not in frappe.whitelisted:
raise frappe.PermissionError
if selected_children:
selected_children = json.loads(selected_children)
if args:
frappe.flags.args = frappe._dict(json.loads(args))
frappe.flags.selected_children = selected_children or None
return method(source_name)
@frappe.whitelist()
def map_docs(method, source_names, target_doc, args=None):
''' Returns the mapped document calling the given mapper method
with each of the given source docs on the target doc
:param args: Args as string to pass to the mapper method
E.g. args: "{ 'supplier': 'XYZ' }" '''
method = frappe.get_attr(method)
if method not in frappe.whitelisted:
raise frappe.PermissionError
for src in json.loads(source_names):
_args = (src, target_doc, json.loads(args)) if args else (src, target_doc)
target_doc = method(*_args)
return target_doc
def get_mapped_doc(from_doctype, from_docname, table_maps, target_doc=None,
postprocess=None, ignore_permissions=False, ignore_child_tables=False):
apply_strict_user_permissions = frappe.get_system_settings("apply_strict_user_permissions")
# main
if not target_doc:
target_doc = frappe.new_doc(table_maps[from_doctype]["doctype"])
elif isinstance(target_doc, string_types):
target_doc = frappe.get_doc(json.loads(target_doc))
if (not apply_strict_user_permissions
and not ignore_permissions and not target_doc.has_permission("create")):
target_doc.raise_no_permission_to("create")
source_doc = frappe.get_doc(from_doctype, from_docname)
if not ignore_permissions:
if not source_doc.has_permission("read"):
source_doc.raise_no_permission_to("read")
map_doc(source_doc, target_doc, table_maps[source_doc.doctype])
row_exists_for_parentfield = {}
# children
if not ignore_child_tables:
for df in source_doc.meta.get_table_fields():
source_child_doctype = df.options
table_map = table_maps.get(source_child_doctype)
# if table_map isn't explicitly specified check if both source and target have the same fieldname and same table options and both of them don't have no_copy
if not table_map:
target_df = target_doc.meta.get_field(df.fieldname)
if target_df:
target_child_doctype = target_df.options
if target_df and target_child_doctype==source_child_doctype and not df.no_copy and not target_df.no_copy:
table_map = {
"doctype": target_child_doctype
}
if table_map:
for source_d in source_doc.get(df.fieldname):
if "condition" in table_map:
if not table_map["condition"](source_d):
continue
# if children are selected (checked from UI) for this table type,
# and this record is not in the selected children, then continue
if (frappe.flags.selected_children
and (df.fieldname in frappe.flags.selected_children)
and source_d.name not in frappe.flags.selected_children[df.fieldname]):
continue
target_child_doctype = table_map["doctype"]
target_parentfield = target_doc.get_parentfield_of_doctype(target_child_doctype)
# does row exist for a parentfield?
if target_parentfield not in row_exists_for_parentfield:
row_exists_for_parentfield[target_parentfield] = (True
if target_doc.get(target_parentfield) else False)
if table_map.get("add_if_empty") and \
row_exists_for_parentfield.get(target_parentfield):
continue
if table_map.get("filter") and table_map.get("filter")(source_d):
continue
map_child_doc(source_d, target_doc, table_map, source_doc)
if postprocess:
postprocess(source_doc, target_doc)
target_doc.set_onload("load_after_mapping", True)
if (apply_strict_user_permissions
and not ignore_permissions and not target_doc.has_permission("create")):
target_doc.raise_no_permission_to("create")
return target_doc
def map_doc(source_doc, target_doc, table_map, source_parent=None):
if table_map.get("validation"):
for key, condition in table_map["validation"].items():
if condition[0]=="=":
if source_doc.get(key) != condition[1]:
frappe.throw(_("Cannot map because following condition fails: ")
+ key + "=" + cstr(condition[1]))
map_fields(source_doc, target_doc, table_map, source_parent)
if "postprocess" in table_map:
table_map["postprocess"](source_doc, target_doc, source_parent)
def map_fields(source_doc, target_doc, table_map, source_parent):
no_copy_fields = set([d.fieldname for d in source_doc.meta.get("fields") if (d.no_copy==1 or d.fieldtype in table_fields)]
+ [d.fieldname for d in target_doc.meta.get("fields") if (d.no_copy==1 or d.fieldtype in table_fields)]
+ list(default_fields)
+ list(table_map.get("field_no_map", [])))
for df in target_doc.meta.get("fields"):
if df.fieldname not in no_copy_fields:
# map same fields
val = source_doc.get(df.fieldname)
if val not in (None, ""):
target_doc.set(df.fieldname, val)
elif df.fieldtype == "Link":
if not target_doc.get(df.fieldname):
# map link fields having options == source doctype
if df.options == source_doc.doctype:
target_doc.set(df.fieldname, source_doc.name)
elif source_parent and df.options == source_parent.doctype:
target_doc.set(df.fieldname, source_parent.name)
# map other fields
field_map = table_map.get("field_map")
if field_map:
if isinstance(field_map, dict):
for source_key, target_key in field_map.items():
val = source_doc.get(source_key)
if val not in (None, ""):
target_doc.set(target_key, val)
else:
for fmap in field_map:
val = source_doc.get(fmap[0])
if val not in (None, ""):
target_doc.set(fmap[1], val)
# map idx
if source_doc.idx:
target_doc.idx = source_doc.idx
# add fetch
for df in target_doc.meta.get("fields", {"fieldtype": "Link"}):
if target_doc.get(df.fieldname):
map_fetch_fields(target_doc, df, no_copy_fields)
def map_fetch_fields(target_doc, df, no_copy_fields):
linked_doc = None
# options should be like "link_fieldname.fieldname_in_liked_doc"
for fetch_df in target_doc.meta.get("fields", {"fetch_from": "^{0}.".format(df.fieldname)}):
if not (fetch_df.fieldtype == "Read Only" or fetch_df.read_only):
continue
if ((not target_doc.get(fetch_df.fieldname) or fetch_df.fieldtype == "Read Only")
and fetch_df.fieldname not in no_copy_fields):
source_fieldname = fetch_df.fetch_from.split(".")[1]
if not linked_doc:
try:
linked_doc = frappe.get_doc(df.options, target_doc.get(df.fieldname))
except:
return
val = linked_doc.get(source_fieldname)
if val not in (None, ""):
target_doc.set(fetch_df.fieldname, val)
def map_child_doc(source_d, target_parent, table_map, source_parent=None):
target_child_doctype = table_map["doctype"]
target_parentfield = target_parent.get_parentfield_of_doctype(target_child_doctype)
target_d = frappe.new_doc(target_child_doctype, target_parent, target_parentfield)
map_doc(source_d, target_d, table_map, source_parent)
target_d.idx = None
target_parent.append(target_parentfield, target_d)
return target_d
| mit | 4,925,199,684,571,421,000 | 33.267544 | 159 | 0.707539 | false |
amaozhao/basecms | cms/models/titlemodels.py | 16 | 5249 | # -*- coding: utf-8 -*-
from datetime import timedelta
from cms.constants import PUBLISHER_STATE_DIRTY
from cms.utils.compat.dj import python_2_unicode_compatible
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from cms.models.managers import TitleManager
from cms.models.pagemodel import Page
from cms.utils.helpers import reversion_register
@python_2_unicode_compatible
class Title(models.Model):
language = models.CharField(_("language"), max_length=15, db_index=True)
title = models.CharField(_("title"), max_length=255)
page_title = models.CharField(_("title"), max_length=255, blank=True, null=True,
help_text=_("overwrite the title (html title tag)"))
menu_title = models.CharField(_("title"), max_length=255, blank=True, null=True,
help_text=_("overwrite the title in the menu"))
meta_description = models.TextField(_("description"), max_length=155, blank=True, null=True,
help_text=_("The text displayed in search engines."))
slug = models.SlugField(_("slug"), max_length=255, db_index=True, unique=False)
path = models.CharField(_("Path"), max_length=255, db_index=True)
has_url_overwrite = models.BooleanField(_("has url overwrite"), default=False, db_index=True, editable=False)
redirect = models.CharField(_("redirect"), max_length=255, blank=True, null=True)
page = models.ForeignKey(Page, verbose_name=_("page"), related_name="title_set")
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
# Publisher fields
published = models.BooleanField(_("is published"), blank=True, default=False)
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
publisher_state = models.SmallIntegerField(default=0, editable=False, db_index=True)
objects = TitleManager()
class Meta:
unique_together = (('language', 'page'),)
app_label = 'cms'
def __str__(self):
return u"%s (%s, %s)" % (self.title, self.slug, self.language)
def update_path(self):
# Build path from parent page's path and slug
slug = u'%s' % self.slug
if not self.has_url_overwrite:
self.path = u'%s' % slug
if self.page.parent_id:
parent_page = self.page.parent_id
parent_title = Title.objects.get_title(parent_page, language=self.language, language_fallback=True)
if parent_title:
self.path = u'%s/%s' % (parent_title.path, slug)
@property
def overwrite_url(self):
"""Return overwritten url, or None
"""
if self.has_url_overwrite:
return self.path
return None
def is_dirty(self):
return self.publisher_state == PUBLISHER_STATE_DIRTY
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
# Published pages should always have a publication date
# if the page is published we set the publish date if not set yet.
if self.page.publication_date is None and self.published:
self.page.publication_date = timezone.now() - timedelta(seconds=5)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.publisher_state = PUBLISHER_STATE_DIRTY
if keep_state:
delattr(self, '_publisher_keep_state')
ret = super(Title, self).save_base(*args, **kwargs)
return ret
def is_new_dirty(self):
if self.pk:
fields = [
'title', 'page_title', 'menu_title', 'meta_description', 'slug', 'has_url_overwrite', 'redirect'
]
try:
old_title = Title.objects.get(pk=self.pk)
except Title.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_title, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
class EmptyTitle(object):
def __init__(self, language):
self.language = language
"""Empty title object, can be returned from Page.get_title_obj() if required
title object doesn't exists.
"""
title = ""
slug = ""
path = ""
meta_description = ""
redirect = ""
has_url_overwrite = False
application_urls = ""
menu_title = ""
page_title = ""
published = False
@property
def overwrite_url(self):
return None
reversion_register(Title)
| mit | -4,258,440,278,298,718,000 | 38.171642 | 115 | 0.623357 | false |
elahejalalpour/ELRyu | ryu/lib/stringify.py | 8 | 12829 | #!/usr/bin/env python
#
# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2013 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import inspect
import six
# Some arguments to __init__ is mungled in order to avoid name conflicts
# with builtin names.
# The standard mangling is to append '_' in order to avoid name clashes
# with reserved keywords.
#
# PEP8:
# Function and method arguments
# If a function argument's name clashes with a reserved keyword,
# it is generally better to append a single trailing underscore
# rather than use an abbreviation or spelling corruption. Thus
# class_ is better than clss. (Perhaps better is to avoid such
# clashes by using a synonym.)
#
# grep __init__ *.py | grep '[^_]_\>' showed that
# 'len', 'property', 'set', 'type'
# A bit more generic way is adopted
try:
# Python 2
import __builtin__
except ImportError:
# Python 3
import builtins as __builtin__
_RESERVED_KEYWORD = dir(__builtin__)
_mapdict = lambda f, d: dict([(k, f(v)) for k, v in d.items()])
_mapdict_key = lambda f, d: dict([(f(k), v) for k, v in d.items()])
_mapdict_kv = lambda f, d: dict([(k, f(k, v)) for k, v in d.items()])
class TypeDescr(object):
pass
class AsciiStringType(TypeDescr):
@staticmethod
def encode(v):
# TODO: AsciiStringType data should probably be stored as
# text_type in class data. This isinstance() check exists
# because OFPDescStats violates this.
if six.PY3 and isinstance(v, six.text_type):
return v
return six.text_type(v, 'ascii')
@staticmethod
def decode(v):
if six.PY3:
return v
return v.encode('ascii')
class Utf8StringType(TypeDescr):
@staticmethod
def encode(v):
return six.text_type(v, 'utf-8')
@staticmethod
def decode(v):
return v.encode('utf-8')
class AsciiStringListType(TypeDescr):
@staticmethod
def encode(v):
return [AsciiStringType.encode(x) for x in v]
@staticmethod
def decode(v):
return [AsciiStringType.decode(x) for x in v]
class NXFlowSpecFieldType(TypeDescr):
# ("field_name", 0) <-> ["field_name", 0]
@staticmethod
def encode(v):
if not isinstance(v, tuple):
return v
field, ofs = v
return [field, ofs]
@staticmethod
def decode(v):
if not isinstance(v, list):
return v
field, ofs = v
return (field, ofs)
_types = {
'ascii': AsciiStringType,
'utf-8': Utf8StringType,
'asciilist': AsciiStringListType,
'nx-flow-spec-field': NXFlowSpecFieldType, # XXX this should not be here
}
class StringifyMixin(object):
_TYPE = {}
"""_TYPE class attribute is used to annotate types of attributes.
This type information is used to find an appropriate conversion for
a JSON style dictionary.
Currently the following types are implemented.
========= =============
Type Descrption
========= =============
ascii US-ASCII
utf-8 UTF-8
asciilist list of ascii
========= =============
Example::
_TYPE = {
'ascii': [
'hw_addr',
],
'utf-8': [
'name',
]
}
"""
_class_prefixes = []
_class_suffixes = []
def stringify_attrs(self):
"""an override point for sub classes"""
return obj_python_attrs(self)
def __str__(self):
# repr() to escape binaries
return self.__class__.__name__ + '(' + \
','.join("%s=%s" % (k, repr(v)) for k, v in
self.stringify_attrs()) + ')'
__repr__ = __str__ # note: str(list) uses __repr__ for elements
@classmethod
def _is_class(cls, dict_):
# we distinguish a dict like OFPSwitchFeatures.ports
# from OFPxxx classes using heuristics.
# exmples of OFP classes:
# {"OFPMatch": { ... }}
# {"MTIPv6SRC": { ... }}
assert isinstance(dict_, dict)
if len(dict_) != 1:
return False
k = list(dict_.keys())[0]
if not isinstance(k, (bytes, six.text_type)):
return False
for p in cls._class_prefixes:
if k.startswith(p):
return True
for p in cls._class_suffixes:
if k.endswith(p):
return True
return False
@classmethod
def _get_type(cls, k):
if hasattr(cls, '_TYPE'):
for t, attrs in cls._TYPE.items():
if k in attrs:
return _types[t]
return None
@classmethod
def _get_encoder(cls, k, encode_string):
t = cls._get_type(k)
if t:
return t.encode
return cls._get_default_encoder(encode_string)
@classmethod
def _encode_value(cls, k, v, encode_string=base64.b64encode):
return cls._get_encoder(k, encode_string)(v)
@classmethod
def _get_default_encoder(cls, encode_string):
def _encode(v):
if isinstance(v, (bytes, six.text_type)):
if isinstance(v, six.text_type):
v = v.encode('utf-8')
json_value = encode_string(v)
if six.PY3:
json_value = json_value.decode('ascii')
elif isinstance(v, list):
json_value = list(map(_encode, v))
elif isinstance(v, dict):
json_value = _mapdict(_encode, v)
# while a python dict key can be any hashable object,
# a JSON object key should be a string.
json_value = _mapdict_key(str, json_value)
assert not cls._is_class(json_value)
else:
try:
json_value = v.to_jsondict()
except:
json_value = v
return json_value
return _encode
def to_jsondict(self, encode_string=base64.b64encode):
"""
This method returns a JSON style dict to describe this object.
The returned dict is compatible with json.dumps() and json.loads().
Suppose ClassName object inherits StringifyMixin.
For an object like the following::
ClassName(Param1=100, Param2=200)
this method would produce::
{ "ClassName": {"Param1": 100, "Param2": 200} }
This method takes the following arguments.
.. tabularcolumns:: |l|L|
============= =====================================================
Argument Description
============= =====================================================
encode_string (Optional) specify how to encode attributes which has
python 'str' type.
The default is base64.
This argument is used only for attributes which don't
have explicit type annotations in _TYPE class attribute.
============= =====================================================
"""
dict_ = {}
encode = lambda k, x: self._encode_value(k, x, encode_string)
for k, v in obj_attrs(self):
dict_[k] = encode(k, v)
return {self.__class__.__name__: dict_}
@classmethod
def cls_from_jsondict_key(cls, k):
# find a class with the given name from our class' module.
import sys
mod = sys.modules[cls.__module__]
return getattr(mod, k)
@classmethod
def obj_from_jsondict(cls, jsondict, **additional_args):
assert len(jsondict) == 1
for k, v in jsondict.items():
obj_cls = cls.cls_from_jsondict_key(k)
return obj_cls.from_jsondict(v, **additional_args)
@classmethod
def _get_decoder(cls, k, decode_string):
t = cls._get_type(k)
if t:
return t.decode
return cls._get_default_decoder(decode_string)
@classmethod
def _decode_value(cls, k, json_value, decode_string=base64.b64decode,
**additional_args):
return cls._get_decoder(k, decode_string)(json_value)
@classmethod
def _get_default_decoder(cls, decode_string):
def _decode(json_value, **additional_args):
if isinstance(json_value, (bytes, six.text_type)):
v = decode_string(json_value)
elif isinstance(json_value, list):
v = list(map(_decode, json_value))
elif isinstance(json_value, dict):
if cls._is_class(json_value):
v = cls.obj_from_jsondict(json_value, **additional_args)
else:
v = _mapdict(_decode, json_value)
# XXXhack
# try to restore integer keys used by
# OFPSwitchFeatures.ports.
try:
v = _mapdict_key(int, v)
except ValueError:
pass
else:
v = json_value
return v
return _decode
@staticmethod
def _restore_args(dict_):
def restore(k):
if k in _RESERVED_KEYWORD:
return k + '_'
return k
return _mapdict_key(restore, dict_)
@classmethod
def from_jsondict(cls, dict_, decode_string=base64.b64decode,
**additional_args):
"""Create an instance from a JSON style dict.
Instantiate this class with parameters specified by the dict.
This method takes the following arguments.
.. tabularcolumns:: |l|L|
=============== =====================================================
Argument Descrpition
=============== =====================================================
dict\_ A dictionary which describes the parameters.
For example, {"Param1": 100, "Param2": 200}
decode_string (Optional) specify how to decode strings.
The default is base64.
This argument is used only for attributes which don't
have explicit type annotations in _TYPE class
attribute.
additional_args (Optional) Additional kwargs for constructor.
=============== =====================================================
"""
decode = lambda k, x: cls._decode_value(k, x, decode_string,
**additional_args)
kwargs = cls._restore_args(_mapdict_kv(decode, dict_))
try:
return cls(**dict(kwargs, **additional_args))
except TypeError:
# debug
print("CLS %s" % cls)
print("ARG %s" % dict_)
print("KWARG %s" % kwargs)
raise
@classmethod
def set_classes(cls, registered_dict):
cls._class_prefixes.extend([v.__name__ for v in
registered_dict.values()])
def obj_python_attrs(msg_):
"""iterate object attributes for stringify purposes
"""
# a special case for namedtuple which seems widely used in
# ofp parser implementations.
if hasattr(msg_, '_fields'):
for k in msg_._fields:
yield(k, getattr(msg_, k))
return
base = getattr(msg_, '_base_attributes', [])
for k, v in inspect.getmembers(msg_):
if k.startswith('_'):
continue
if callable(v):
continue
if k in base:
continue
if hasattr(msg_.__class__, k):
continue
yield (k, v)
def obj_attrs(msg_):
"""similar to obj_python_attrs() but deals with python reserved keywords
"""
if isinstance(msg_, StringifyMixin):
iter = msg_.stringify_attrs()
else:
# probably called by msg_str_attr
iter = obj_python_attrs(msg_)
for k, v in iter:
if k.endswith('_') and k[:-1] in _RESERVED_KEYWORD:
# XXX currently only StringifyMixin has restoring logic
assert isinstance(msg_, StringifyMixin)
k = k[:-1]
yield (k, v)
| apache-2.0 | -6,690,845,110,445,938,000 | 30.833747 | 79 | 0.536987 | false |
missionpinball/mpf | mpf/commands/__init__.py | 1 | 6303 | """MPF CLI commands."""
import argparse
from importlib import import_module
import os
import sys
from pkg_resources import iter_entry_points
import mpf.core
from mpf._version import version
EXAMPLES_FOLDER = 'examples'
CONFIG_FOLDER = 'config'
class MpfCommandLineParser:
"""Base class for cli commands."""
def __init__(self, args, path):
"""Initialise CLI entry point."""
self.argv = args
self.path = path
self.mpf_path = os.path.abspath(os.path.join(mpf.core.__path__[0],
os.pardir))
def get_machine_path(self, machine_path_hint=None) -> str:
"""Find the full machine path based on the current directory and option hint.
Args:
----
machine_path_hint: Helps MPF locate the machine path. If None,
the 'config' folder in the current working directory is used.
Returns a string of full path of the machine folder that was located.
"""
machine_path = None
if machine_path_hint:
if os.path.isdir(os.path.join(self.path, machine_path_hint)):
# If the path hint resolves to a folder, use that as the
# machine folder
machine_path = os.path.join(self.path, machine_path_hint)
else:
# If the folder is invalid, see if we have an examples machine
# folder with that name
example_machine_path = os.path.abspath(os.path.join(
self.mpf_path, os.pardir, EXAMPLES_FOLDER,
machine_path_hint))
if os.path.isdir(example_machine_path):
machine_path = example_machine_path
else:
# no path hint passed.
# Is there a /config folder in our current folder? If so we assume
# the current folder is the machine folder
if os.path.isdir(os.path.join(self.path, CONFIG_FOLDER)):
machine_path = self.path
if machine_path:
return machine_path
if machine_path_hint:
wrong_path = os.path.abspath(machine_path_hint)
else:
wrong_path = os.path.abspath(os.curdir)
raise AssertionError("Error: Could not find machine in folder: '{}'. "
"Either start MPF from within your machine root folder or provide the path after the "
"command.".format(wrong_path))
def parse_args(self):
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='MPF Command')
parser.add_argument("machine_path", help="Path of the machine folder.",
default=None, nargs='?')
parser.add_argument("--version",
action="version", version=version,
help="Displays the MPF, config file, and BCP "
"version info and exits")
# the problem with parser.add_argument is it will take the first
# positional argument it finds for machine_path and set it to the
# machine path, regardless of what's in front of it. So for example,
# args of "-c step4" will lead to machine_path='step4', but that's not
# right, machine_path should be None. But this is because it doesn't
# know that -c wants to consume the next positional arg.
# So our workaround is we check if there are any argv, and if so, we
# check to see if the first one starts with a dash, meaning it's an
# optional arg and guaranteeing that whatever's after it is NOT our
# machine path, so in that case, we just insert a None as the machine
# path in front of it and everything is cool.
if len(self.argv) > 1 and self.argv[1].startswith('-'):
self.argv.insert(1, None)
args, remaining_args = parser.parse_known_args(self.argv[1:])
machine_path = self.get_machine_path(args.machine_path)
return machine_path, remaining_args
class CommandLineUtility(MpfCommandLineParser):
"""Default CLI entry point."""
def __init__(self, path=None):
"""Initialise CLI entry point."""
super().__init__(path=path, args=sys.argv[:])
self.external_commands = dict()
self.get_external_commands()
def get_external_commands(self):
"""Entry point to hook more commands.
This is used from mpf mc.
"""
for entry_point in iter_entry_points(group='mpf.command', name=None):
command, function_ref = entry_point.load()()
self.external_commands[command] = function_ref
@classmethod
def check_python_version(cls):
"""Check that we have at least Python 3."""
if sys.version_info[0] != 3:
print("MPF requires Python 3. You have Python {}.{}.{}".format(
sys.version_info[0], sys.version_info[1], sys.version_info[2]
))
sys.exit()
def execute(self):
"""Execute the command that was just set up."""
self.check_python_version()
commands = set()
for file in os.listdir(os.path.join(self.mpf_path, 'commands')):
commands.add(os.path.splitext(file)[0])
command = 'game'
if len(self.argv) > 1:
if self.argv[1] in self.external_commands:
command = self.argv.pop(1)
self.external_commands[command](self.mpf_path,
*self.parse_args())
return
if self.argv[1] in commands:
command = self.argv.pop(1)
_module = import_module('mpf.commands.%s' % command)
if hasattr(_module, "SUBCOMMAND") and _module.SUBCOMMAND:
_module.Command(self.argv, self.path)
else:
machine_path, remaining_args = self.parse_args()
_module.Command(self.mpf_path, machine_path, remaining_args)
def run_from_command_line(args=None):
"""Run a CLI command.
Args:
----
args: Command line arguments that were passed.
"""
del args
path = os.path.abspath(os.path.curdir)
CommandLineUtility(path).execute()
| mit | -4,022,040,592,751,938,600 | 34.610169 | 115 | 0.579724 | false |
liesbethvanherpe/NeuroM | neurom/check/tests/test_neuron_checks.py | 1 | 10546 | # Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from copy import deepcopy
from nose import tools as nt
from neurom import load_neuron
from neurom import check
from neurom.check import neuron_checks as nrn_chk
from neurom.core.dataformat import COLS
from neurom._compat import range
_path = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(_path, '../../../test_data')
SWC_PATH = os.path.join(DATA_PATH, 'swc')
H5V1_PATH = os.path.join(DATA_PATH, 'h5/v1')
def _load_neuron(name):
if name.endswith('.swc'):
path = os.path.join(SWC_PATH, name)
elif name.endswith('.h5'):
path = os.path.join(H5V1_PATH, name)
return name, load_neuron(path)
def _make_monotonic(neuron):
for neurite in neuron.neurites:
for node in neurite.iter_sections():
sec = node.points
if node.parent is not None:
sec[0][COLS.R] = node.parent.points[-1][COLS.R] / 2.
for point_id in range(len(sec) - 1):
sec[point_id + 1][COLS.R] = sec[point_id][COLS.R] / 2.
def _make_flat(neuron):
class Flattenizer(object):
def __call__(self, points):
points = deepcopy(points)
points[:, COLS.Z] = 0.;
return points
return neuron.transform(Flattenizer())
NEURONS = dict([_load_neuron(n) for n in ['Neuron.h5',
'Neuron_2_branch.h5',
'Neuron.swc',
'Neuron_small_radius.swc',
'Neuron_zero_length_sections.swc',
'Neuron_zero_length_segments.swc',
'Neuron_zero_radius.swc',
'Single_apical.swc',
'Single_axon.swc',
'Single_basal.swc',
]])
def _pick(files):
return [NEURONS[f] for f in files]
def test_has_axon_good_data():
files = ['Neuron.swc',
'Neuron_small_radius.swc',
'Single_axon.swc',
'Neuron.h5',
]
for n in _pick(files):
nt.ok_(nrn_chk.has_axon(n))
def test_has_axon_bad_data():
files = ['Single_apical.swc',
'Single_basal.swc',
]
for n in _pick(files):
nt.ok_(not nrn_chk.has_axon(n))
def test_has_apical_dendrite_good_data():
files = ['Neuron.swc',
'Neuron_small_radius.swc',
'Single_apical.swc',
'Neuron.h5',
]
for n in _pick(files):
nt.ok_(nrn_chk.has_apical_dendrite(n))
def test_has_apical_dendrite_bad_data():
files = ['Single_axon.swc',
'Single_basal.swc',
]
for n in _pick(files):
nt.ok_(not nrn_chk.has_apical_dendrite(n))
def test_has_basal_dendrite_good_data():
files = ['Neuron.swc',
'Neuron_small_radius.swc',
'Single_basal.swc',
'Neuron_2_branch.h5',
'Neuron.h5',
]
for n in _pick(files):
nt.ok_(nrn_chk.has_basal_dendrite(n))
def test_has_basal_dendrite_bad_data():
files = ['Single_axon.swc',
'Single_apical.swc',
]
for n in _pick(files):
nt.ok_(not nrn_chk.has_basal_dendrite(n))
def test_has_no_flat_neurites():
_, n = _load_neuron('Neuron.swc')
nt.assert_true(nrn_chk.has_no_flat_neurites(n, 1e-6, method='tolerance'))
nt.assert_true(nrn_chk.has_no_flat_neurites(n, 0.1, method='ratio'))
n = _make_flat(n)
nt.assert_false(nrn_chk.has_no_flat_neurites(n, 1e-6, method='tolerance'))
nt.assert_false(nrn_chk.has_no_flat_neurites(n, 0.1, method='ratio'))
def test_has_all_monotonic_neurites():
_, n = _load_neuron('Neuron.swc')
nt.assert_false(nrn_chk.has_all_monotonic_neurites(n))
_make_monotonic(n)
nt.assert_true(nrn_chk.has_all_monotonic_neurites(n))
def test_nonzero_neurite_radii_good_data():
files = ['Neuron.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
'Neuron_2_branch.h5',
]
for n in _pick(files):
ids = nrn_chk.has_all_nonzero_neurite_radii(n)
nt.ok_(len(ids.info) == 0)
def test_has_all_nonzero_neurite_radii_threshold():
nrn = NEURONS['Neuron.swc']
ids = nrn_chk.has_all_nonzero_neurite_radii(nrn)
nt.ok_(ids.status)
ids = nrn_chk.has_all_nonzero_neurite_radii(nrn, threshold=0.25)
nt.assert_equal(len(ids.info), 122)
def test_nonzero_neurite_radii_bad_data():
nrn = NEURONS['Neuron_zero_radius.swc']
ids = nrn_chk.has_all_nonzero_neurite_radii(nrn)
nt.assert_equal(ids.info, [(20, 10), (21, 0),
(22, 0), (22, 6),
(26, 1), (31, 9),
(50, 7)])
def test_nonzero_segment_lengths_good_data():
nrn = NEURONS['Neuron.swc']
ids = nrn_chk.has_all_nonzero_segment_lengths(nrn)
nt.ok_(ids.status)
nt.ok_(len(ids.info) == 0)
def test_nonzero_segment_lengths_bad_data():
files = ['Neuron_zero_length_segments.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
]
bad_ids = [[(2, 0), (23, 0), (44, 0), (65, 0)],
[(2, 0)],
[(2, 0)],
[(2, 0)],
[(2, 0)]]
for i, nrn in enumerate(_pick(files)):
ids = nrn_chk.has_all_nonzero_segment_lengths(nrn)
nt.assert_equal(ids.info, bad_ids[i])
def test_nonzero_segment_lengths_threshold():
nrn = NEURONS['Neuron.swc']
ids = nrn_chk.has_all_nonzero_segment_lengths(nrn)
nt.ok_(ids.status)
nt.assert_equal(len(ids.info), 0)
ids = nrn_chk.has_all_nonzero_segment_lengths(nrn, threshold=0.25)
nt.assert_equal(ids.info, [(2, 0), (23, 0), (38, 9), (44, 0),
(54, 7), (62, 2), (65, 0), (72, 4), (78, 6)])
def test_nonzero_section_lengths_good_data():
files = ['Neuron.swc',
'Single_apical.swc',
'Single_basal.swc',
'Single_axon.swc',
]
for i, nrn in enumerate(_pick(files)):
ids = nrn_chk.has_all_nonzero_section_lengths(nrn)
nt.ok_(ids.status)
nt.ok_(len(ids.info) == 0)
def test_nonzero_section_lengths_bad_data():
nrn = NEURONS['Neuron_zero_length_sections.swc']
ids = nrn_chk.has_all_nonzero_section_lengths(nrn)
nt.ok_(not ids.status)
nt.assert_equal(ids.info, [15])
def test_nonzero_section_lengths_threshold():
nrn = NEURONS['Neuron.swc']
ids = nrn_chk.has_all_nonzero_section_lengths(nrn)
nt.ok_(ids.status)
nt.ok_(len(ids.info) == 0)
ids = nrn_chk.has_all_nonzero_section_lengths(nrn, threshold=15.)
nt.ok_(not ids.status)
nt.assert_equal(len(ids.info), 84)
def test_has_nonzero_soma_radius():
nrn = load_neuron(os.path.join(SWC_PATH, 'Neuron.swc'))
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn))
def test_has_nonzero_soma_radius_bad_data():
nrn = load_neuron(os.path.join(SWC_PATH, 'Single_basal.swc'))
nt.assert_false(nrn_chk.has_nonzero_soma_radius(nrn).status)
def test_has_no_fat_ends():
_, nrn = _load_neuron('fat_end.swc')
nt.ok_(not nrn_chk.has_no_fat_ends(nrn).status)
# if we only use point, there isn't a 'fat end'
# since if the last point is 'x': x < 2*mean([x])
nt.ok_(nrn_chk.has_no_fat_ends(nrn, final_point_count=1).status)
# if the multiple of the mean is large, the end won't be fat
nt.ok_(nrn_chk.has_no_fat_ends(nrn, multiple_of_mean=10).status)
_, nrn = _load_neuron('Single_basal.swc')
nt.ok_(nrn_chk.has_no_fat_ends(nrn).status)
def test_has_nonzero_soma_radius_threshold():
class Dummy(object):
pass
nrn = Dummy()
nrn.soma = Dummy()
nrn.soma.radius = 1.5
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn))
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn, 0.25))
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn, 0.75))
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn, 1.25))
nt.assert_true(nrn_chk.has_nonzero_soma_radius(nrn, 1.499))
nt.assert_false(nrn_chk.has_nonzero_soma_radius(nrn, 1.5))
nt.assert_false(nrn_chk.has_nonzero_soma_radius(nrn, 1.75))
nt.assert_false(nrn_chk.has_nonzero_soma_radius(nrn, 2.5))
def test_has_no_jumps():
_, nrn = _load_neuron('z_jump.swc')
nt.ok_(not nrn_chk.has_no_jumps(nrn).status)
nt.ok_(nrn_chk.has_no_jumps(nrn, 100).status)
nt.ok_(nrn_chk.has_no_jumps(nrn, 100, axis='x').status)
def test__bool__():
c = check.CheckResult(status=True)
nt.ok_(c.__nonzero__())
nt.eq_(c.__bool__(), c.__nonzero__())
| bsd-3-clause | 6,472,826,863,199,616,000 | 30.57485 | 86 | 0.5934 | false |
joshblum/django-with-audit | django/contrib/gis/geos/prototypes/io.py | 92 | 8468 | import threading
from ctypes import byref, c_char_p, c_int, c_char, c_size_t, Structure, POINTER
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string, check_sized_string
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure): pass
class WKTWriter_st(Structure): pass
class WKBReader_st(Structure): pass
class WKBWriter_st(Structure): pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
### WKTReader routines ###
wkt_reader_create = GEOSFunc('GEOSWKTReader_create')
wkt_reader_create.restype = WKT_READ_PTR
wkt_reader_destroy = GEOSFunc('GEOSWKTReader_destroy')
wkt_reader_destroy.argtypes = [WKT_READ_PTR]
wkt_reader_read = GEOSFunc('GEOSWKTReader_read')
wkt_reader_read.argtypes = [WKT_READ_PTR, c_char_p]
wkt_reader_read.restype = GEOM_PTR
wkt_reader_read.errcheck = check_geom
### WKTWriter routines ###
wkt_writer_create = GEOSFunc('GEOSWKTWriter_create')
wkt_writer_create.restype = WKT_WRITE_PTR
wkt_writer_destroy = GEOSFunc('GEOSWKTWriter_destroy')
wkt_writer_destroy.argtypes = [WKT_WRITE_PTR]
wkt_writer_write = GEOSFunc('GEOSWKTWriter_write')
wkt_writer_write.argtypes = [WKT_WRITE_PTR, GEOM_PTR]
wkt_writer_write.restype = geos_char_p
wkt_writer_write.errcheck = check_string
### WKBReader routines ###
wkb_reader_create = GEOSFunc('GEOSWKBReader_create')
wkb_reader_create.restype = WKB_READ_PTR
wkb_reader_destroy = GEOSFunc('GEOSWKBReader_destroy')
wkb_reader_destroy.argtypes = [WKB_READ_PTR]
def wkb_read_func(func):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
func.argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
wkb_reader_read = wkb_read_func(GEOSFunc('GEOSWKBReader_read'))
wkb_reader_read_hex = wkb_read_func(GEOSFunc('GEOSWKBReader_readHEX'))
### WKBWriter routines ###
wkb_writer_create = GEOSFunc('GEOSWKBWriter_create')
wkb_writer_create.restype = WKB_WRITE_PTR
wkb_writer_destroy = GEOSFunc('GEOSWKBWriter_destroy')
wkb_writer_destroy.argtypes = [WKB_WRITE_PTR]
# WKB Writing prototypes.
def wkb_write_func(func):
func.argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
func.restype = c_uchar_p
func.errcheck = check_sized_string
return func
wkb_writer_write = wkb_write_func(GEOSFunc('GEOSWKBWriter_write'))
wkb_writer_write_hex = wkb_write_func(GEOSFunc('GEOSWKBWriter_writeHEX'))
# WKBWriter property getter/setter prototypes.
def wkb_writer_get(func, restype=c_int):
func.argtypes = [WKB_WRITE_PTR]
func.restype = restype
return func
def wkb_writer_set(func, argtype=c_int):
func.argtypes = [WKB_WRITE_PTR, argtype]
return func
wkb_writer_get_byteorder = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getByteOrder'))
wkb_writer_set_byteorder = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setByteOrder'))
wkb_writer_get_outdim = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getOutputDimension'))
wkb_writer_set_outdim = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setOutputDimension'))
wkb_writer_get_include_srid = wkb_writer_get(GEOSFunc('GEOSWKBWriter_getIncludeSRID'), restype=c_char)
wkb_writer_set_include_srid = wkb_writer_set(GEOSFunc('GEOSWKBWriter_setIncludeSRID'), argtype=c_char)
### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
def __del__(self):
# Cleaning up with the appropriate destructor.
if self._ptr: self._destructor(self._ptr)
### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
_destructor = wkt_reader_destroy
ptr_type = WKT_READ_PTR
def read(self, wkt):
if not isinstance(wkt, basestring): raise TypeError
return wkt_reader_read(self.ptr, wkt)
class _WKBReader(IOBase):
_constructor = wkb_reader_create
_destructor = wkb_reader_destroy
ptr_type = WKB_READ_PTR
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, buffer):
wkb_s = str(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, basestring):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
_destructor = wkt_writer_destroy
ptr_type = WKT_WRITE_PTR
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
_destructor = wkb_writer_destroy
ptr_type = WKB_WRITE_PTR
def write(self, geom):
"Returns the WKB representation of the given geometry."
return buffer(wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t())))
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
return wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if not order in (0, 1): raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
def _get_outdim(self):
return wkb_writer_get_outdim(self.ptr)
def _set_outdim(self, new_dim):
if not new_dim in (2, 3): raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
outdim = property(_get_outdim, _set_outdim)
# Property for getting/setting the include srid flag.
def _get_include_srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
def _set_include_srid(self, include):
if bool(include): flag = chr(1)
else: flag = chr(0)
wkb_writer_set_include_srid(self.ptr, flag)
srid = property(_get_include_srid, _set_include_srid)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
ewkb_w3d = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w():
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter()
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w():
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter()
return thread_context.wkb_w
def ewkb_w():
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter()
thread_context.ewkb_w.srid = True
return thread_context.ewkb_w
def ewkb_w3d():
if not thread_context.ewkb_w3d:
thread_context.ewkb_w3d = WKBWriter()
thread_context.ewkb_w3d.srid = True
thread_context.ewkb_w3d.outdim = 3
return thread_context.ewkb_w3d
| bsd-3-clause | -1,062,297,409,962,977,000 | 33.991736 | 117 | 0.702527 | false |
scottpurdy/nupic.core | bindings/py/src/nupic/bindings/tools/cyclical_serialization_perf.py | 7 | 4260 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2017, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Capnp serialization performance test that involves a network that contains
a simple PyRegion which in turn contains an extension-based Random instance.
"""
import json
import time
# NOTE need to import capnp first to activate the magic necessary for
# NetworkProto_capnp, etc.
import capnp
from nupic.proto.NetworkProto_capnp import NetworkProto
import nupic.bindings.engine_internal as engine
from nupic.bindings.tools.serialization_test_py_region import \
SerializationTestPyRegion
_SERIALIZATION_LOOPS = 100000
_DESERIALIZATION_LOOPS = 100000
# Capnp reader traveral limit (see capnp::ReaderOptions)
_TRAVERSAL_LIMIT_IN_WORDS = 1 << 63
# Capnp reader nesting limit (see capnp::ReaderOptions)
_NESTING_LIMIT = 1 << 31
# Empirically-derived value of maximum deserialization calls on a single reader
# instance for our network to avoid hitting the capnp kj exception
# "Exceeded message traversal limit". (see capnp::ReaderOptions)
_MAX_DESERIALIZATION_LOOPS_PER_READER = 100000
def _runTest():
net = engine.Network()
net.addRegion(SerializationTestPyRegion.__name__,
"py." + SerializationTestPyRegion.__name__,
json.dumps({
"dataWidth": 128,
"randomSeed": 99,
}))
# Measure serialization
startSerializationTime = time.time()
for i in xrange(_SERIALIZATION_LOOPS):
# NOTE pycapnp's builder.from_dict (used in nupic.bindings) leaks
# memory if called on the same builder more than once, so we construct a
# fresh builder here
builderProto = NetworkProto.new_message()
net.write(builderProto)
elapsedSerializationTime = time.time() - startSerializationTime
builderBytes = builderProto.to_bytes()
# Measure deserialization
startDeserializationTime = time.time()
deserializationCount = 0
while deserializationCount < _DESERIALIZATION_LOOPS:
# NOTE: periodicaly create a new reader to avoid "Exceeded message traversal
# limit" error
readerProto = NetworkProto.from_bytes(
builderBytes,
traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS,
nesting_limit=_NESTING_LIMIT)
numReads = min(_DESERIALIZATION_LOOPS - deserializationCount,
_MAX_DESERIALIZATION_LOOPS_PER_READER)
for _ in xrange(numReads):
engine.Network.read(readerProto)
deserializationCount += numReads
elapsedDeserializationTime = time.time() - startDeserializationTime
# Print report
print _SERIALIZATION_LOOPS, "Serialization loops in", \
elapsedSerializationTime, "seconds."
print "\t", elapsedSerializationTime/_SERIALIZATION_LOOPS, "seconds per loop."
print deserializationCount, "Deserialization loops in", \
elapsedDeserializationTime, "seconds."
print "\t", elapsedDeserializationTime/deserializationCount, "seconds per loop."
def main():
"""Measure capnp serialization performance of a network containing a simple
python region that in-turn contains a Random instance.
"""
engine.Network.registerPyRegion(__name__,
SerializationTestPyRegion.__name__)
try:
_runTest()
finally:
engine.Network.unregisterPyRegion(SerializationTestPyRegion.__name__)
if __name__ == "__main__":
main()
| agpl-3.0 | 8,285,896,536,346,109,000 | 32.809524 | 82 | 0.702817 | false |
schollz/extract_recipe | get_freebase_num/probeFreebase.py | 1 | 1456 | import json
import urllib
import sqlite3 as lite
from unidecode import unidecode
'''
last_ndb_no = '10909'
start = False
con = lite.connect('db')
with con:
cur = con.cursor()
cur.execute('select ndb_no,shrt_desc from food_des')
rows = cur.fetchall()
for row in rows:
ndb_no = row[0]
print ndb_no
if start:
api_key = open(".freebase_api_key").read()
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
query = [{"name": [],"/food/food/usda_id": ndb_no}]
params = {
'query': json.dumps(query),
'key': api_key
}
url = service_url + '?' + urllib.urlencode(params)
urlOpen = urllib.urlopen(url)
response = json.loads(urlOpen.read())
if len(response['result'])>0:
for planet in response['result']:
print row[1] + unidecode(planet['name'][0])
with open('ndb_no_freebase.txt','a') as f:
f.write(ndb_no + "|" + unidecode(planet['name'][0]) + "\n")
if last_ndb_no in ndb_no:
start = True
'''
# Alter the database
con = lite.connect('db')
with con:
cur = con.cursor()
print "alter table food_des add column com_desc varchar(60);"
with open('ndb_no_freebase.txt','rb') as f:
for line in f:
(ndb_no,com_desc) = line.split('|')
com_desc = com_desc.strip()
command = 'update food_des set com_desc="%s" where ndb_no="%s";'%(com_desc,ndb_no)
print command
cur.execute(command)
| apache-2.0 | -4,355,018,759,415,273,000 | 28.714286 | 85 | 0.594093 | false |
aprefontaine/TMScheduler | tests/regressiontests/builtin_server/tests.py | 14 | 1716 | from unittest import TestCase
from StringIO import StringIO
from django.core.servers.basehttp import ServerHandler
#
# Tests for #9659: wsgi.file_wrapper in the builtin server.
# We need to mock a couple of of handlers and keep track of what
# gets called when using a couple kinds of WSGI apps.
#
class DummyHandler(object):
def log_request(*args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
ServerHandler.__init__(self, *args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return ['Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return environ['wsgi.file_wrapper'](StringIO('foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
err = StringIO()
handler = FileWrapperHandler(None, StringIO(), err, env)
handler.run(wsgi_app_file_wrapper)
self.assert_(handler._used_sendfile)
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
err = StringIO()
handler = FileWrapperHandler(None, StringIO(), err, env)
handler.run(wsgi_app)
self.failIf(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1],'Hello World!')
| bsd-3-clause | -6,528,496,780,133,795,000 | 32.647059 | 83 | 0.661422 | false |
Melowkid/Talos | talos.py | 1 | 44447 | import subprocess
from sets import Set
import sys, os
from proofTree import proofTree
from outputParser import toSNotation
sys.path.append(os.path.join(os.path.dirname(__file__), '../', 'DCEC_Library'))
from DCEC_Library.DCECContainer import DCECContainer
class spassContainer():
directory = os.path.dirname(__file__)
DCECVersion = ""
input = ""
errors = ""
output = ""
result = None
axioms = dict([])
discoveries = []
sorts = []
conjecture = None
proof=None
simultaneousRules = dict([
("DCEC_RULE_1",("formula(forall([Moment(z),Agent(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(P3AgentMomentBoolean(y,z,x),K3AgentMomentBoolean(y,z,x))))),DCEC_RULE_1).",["C2MomentBoolean","Implies2BooleanBoolean","P3AgentMomentBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_10",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Implies2BooleanBoolean(And2BooleanBoolean(y,x),w),Implies2BooleanBoolean(y,Implies2BooleanBoolean(x,w)))))),DCEC_RULE_10).",["C2MomentBoolean","Implies2BooleanBoolean","And2BooleanBoolean"])),
("DCEC_RULE_11A",("formula(forall([Agent(z),Moment(y),Boolean(x),Boolean(w)],implies(and(isValid(B3AgentMomentBoolean(z,y,Implies2BooleanBoolean(x,w))),isValid(B3AgentMomentBoolean(z,y,x))),isValid(B3AgentMomentBoolean(z,y,w)))),DCEC_RULE_11A).",["B3AgentMomentBoolean","Implies2BooleanBoolean"])),
("DCEC_RULE_11B",("formula(forall([Agent(z),Moment(y),Boolean(x),Boolean(w)],implies(and(isValid(B3AgentMomentBoolean(z,y,x)),isValid(B3AgentMomentBoolean(z,y,w))),isValid(B3AgentMomentBoolean(z,y,And2BooleanBoolean(x,w))))),DCEC_RULE_11B).",["B3AgentMomentBoolean","And2BooleanBoolean"])),
("DCEC_RULE_12",("formula(forall([Agent(z),Agent(x),Moment(y),Boolean(w)],implies(isValid(S4AgentAgentMomentBoolean(z,x,y,w)),isValid(B3AgentMomentBoolean(x,y,B3AgentMomentBoolean(z,y,w))))),DCEC_RULE_12).",["S4AgentAgentMomentBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_13",("formula(forall([Agent(z),Moment(y),ActionType(x)],implies(isValid(I3AgentMomentBoolean(z,y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),x),y))),isValid(P3AgentMomentBoolean(z,y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),x),y))))),DCEC_RULE_13).",["I3AgentMomentBoolean","Happens2EventMoment","Action2AgentActionType","Self1Agent","P3AgentMomentBoolean"])),
("DCEC_RULE_14",("formula(forall([Agent(z),Moment(y),Boolean(x),ActionType(w)],implies(and(isValid(B3AgentMomentBoolean(z,y,x)),isValid(B3AgentMomentBoolean(z,y,O4AgentMomentBooleanBoolean(Self1Agent(z),y,x,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),y)))),isValid(O4AgentMomentBooleanBoolean(z,y,x,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),y)))),isValid(K3AgentMomentBoolean(z,y,I3AgentMomentBoolean(Self1Agent(z),y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),y)))))),DCEC_RULE_14).",["B3AgentMomentBoolean","O4AgentMomentBooleanBoolean","Happens2EventMoment","Action2AgentActionType","Self1Agent","K3AgentMomentBoolean","I3AgentMomentBoolean"])),
("DCEC_RULE_15",("formula(forall([Boolean(z),Boolean(y),Agent(x),Moment(w),Boolean(v)],implies(isValid(Iff2BooleanBoolean(z,y)),isValid(Iff2BooleanBoolean(O4AgentMomentBooleanBoolean(x,w,z,v),O4AgentMomentBooleanBoolean(x,w,y,v))))),DCEC_RULE_15).",["Iff2BooleanBoolean","O4AgentMomentBooleanBoolean"])),
("DCEC_RULE_2",("formula(forall([Moment(z),Agent(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(K3AgentMomentBoolean(y,z,x),B3AgentMomentBoolean(y,z,x))))),DCEC_RULE_2).",["C2MomentBoolean","Implies2BooleanBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_3",("formula(forall([Moment(z),Boolean(y),Agent(x),Agent(w),Agent(v)],implies(isValid(C2MomentBoolean(z,y)),isValid(K3AgentMomentBoolean(x,z,K3AgentMomentBoolean(w,z,K3AgentMomentBoolean(v,z,y)))))),DCEC_RULE_3).",["C2MomentBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_4",("formula(forall([Agent(z),Moment(y),Boolean(x)],implies(isValid(K3AgentMomentBoolean(z,y,x)),isValid(x))),DCEC_RULE_4).",["K3AgentMomentBoolean"])),
("DCEC_RULE_5",("formula(forall([Moment(z),Agent(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(K3AgentMomentBoolean(y,z,Implies2BooleanBoolean(x,w)),Implies2BooleanBoolean(K3AgentMomentBoolean(y,z,x),K3AgentMomentBoolean(y,z,w)))))),DCEC_RULE_5).",["C2MomentBoolean","Implies2BooleanBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_6",("formula(forall([Moment(z),Agent(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(B3AgentMomentBoolean(y,z,Implies2BooleanBoolean(x,w)),Implies2BooleanBoolean(B3AgentMomentBoolean(y,z,x),B3AgentMomentBoolean(y,z,w)))))),DCEC_RULE_6).",["C2MomentBoolean","Implies2BooleanBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_7",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(C2MomentBoolean(z,Implies2BooleanBoolean(y,x)),Implies2BooleanBoolean(C2MomentBoolean(z,y),C2MomentBoolean(z,x)))))),DCEC_RULE_7).",["C2MomentBoolean","Implies2BooleanBoolean"])),
("DCEC_RULE_9",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Iff2BooleanBoolean(y,x),Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)))))),DCEC_RULE_9).",["C2MomentBoolean","Implies2BooleanBoolean","Iff2BooleanBoolean","Not1Boolean"])),
("MODUS_PONENS",("formula(forall([Boolean(z),Boolean(y)],implies(and(isValid(z),isValid(Implies2BooleanBoolean(z,y))),isValid(y))),MODUS_PONENS).",["Implies2BooleanBoolean"])),
])
temporalRules = dict([
("DCEC_RULE_1",("formula(forall([Moment(z),Agent(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(P3AgentMomentBoolean(y,z,x),K3AgentMomentBoolean(y,z,x))))),DCEC_RULE_1).",["C2MomentBoolean","Implies2BooleanBoolean","P3AgentMomentBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_2",("formula(forall([Moment(z),Agent(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(K3AgentMomentBoolean(y,z,x),B3AgentMomentBoolean(y,z,x))))),DCEC_RULE_2).",["C2MomentBoolean","Implies2BooleanBoolean","K3AgentMomentBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_3",("formula(forall([Moment(z),Moment(y),Moment(x),Moment(w),Boolean(v),Agent(u),Agent(t),Agent(s)],implies(and(isValid(Lessorequal2MomentMoment(z,y)),isValid(Lessorequal2MomentMoment(y,x)),isValid(Lessorequal2MomentMoment(x,w)),isValid(C2MomentBoolean(z,v))),isValid(K3AgentMomentBoolean(u,y,K3AgentMomentBoolean(t,x,K3AgentMomentBoolean(s,w,v)))))),DCEC_RULE_3).",["Lessorequal2MomentMoment","C2MomentBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_4",("formula(forall([Agent(z),Moment(y),Boolean(x)],implies(isValid(K3AgentMomentBoolean(z,y,x)),isValid(x))),DCEC_RULE_4).",["K3AgentMomentBoolean"])),
("DCEC_RULE_5",("formula(forall([Moment(z),Agent(y),Boolean(x),Boolean(w),Moment(v),Moment(u),Moment(t)],implies(and(isValid(Lessorequal2MomentMoment(v,t)),isValid(Lessorequal2MomentMoment(u,t))),isValid(C2MomentBoolean(z,Implies2BooleanBoolean(K3AgentMomentBoolean(y,v,Implies2BooleanBoolean(x,w)),Implies2BooleanBoolean(K3AgentMomentBoolean(y,u,x),K3AgentMomentBoolean(y,t,w))))))),DCEC_RULE_5).",["Lessorequal2MomentMoment","C2MomentBoolean","Implies2BooleanBoolean","K3AgentMomentBoolean"])),
("DCEC_RULE_6",("formula(forall([Moment(z),Agent(y),Boolean(x),Boolean(w),Moment(v),Moment(u),Moment(t)],implies(and(isValid(Lessorequal2MomentMoment(v,t)),isValid(Lessorequal2MomentMoment(u,t))),isValid(C2MomentBoolean(z,Implies2BooleanBoolean(B3AgentMomentBoolean(y,v,Implies2BooleanBoolean(x,w)),Implies2BooleanBoolean(B3AgentMomentBoolean(y,u,x),B3AgentMomentBoolean(y,t,w))))))),DCEC_RULE_6).",["Lessorequal2MomentMoment","C2MomentBoolean","Implies2BooleanBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_7",("formula(forall([Moment(z),Boolean(y),Boolean(x),Moment(w),Moment(v),Moment(u)],implies(and(isValid(Lessorequal2MomentMoment(w,u)),isValid(Lessorequal2MomentMoment(v,u))),isValid(C2MomentBoolean(z,Implies2BooleanBoolean(C2MomentBoolean(w,Implies2BooleanBoolean(y,x)),Implies2BooleanBoolean(C2MomentBoolean(v,y),C2MomentBoolean(u,x))))))),DCEC_RULE_7).",["Lessorequal2MomentMoment","C2MomentBoolean","Implies2BooleanBoolean"])),
("DCEC_RULE_9",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Iff2BooleanBoolean(y,x),Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)))))),DCEC_RULE_9).",["C2MomentBoolean","Implies2BooleanBoolean","Iff2BooleanBoolean","Not1Boolean"])),
("DCEC_RULE_10",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Implies2BooleanBoolean(And2BooleanBoolean(y,x),w),Implies2BooleanBoolean(y,Implies2BooleanBoolean(x,w)))))),DCEC_RULE_10).",["C2MomentBoolean","Implies2BooleanBoolean","And2BooleanBoolean"])),
("DCEC_RULE_11A",("formula(forall([Agent(z),Moment(y),Boolean(x),Boolean(w)],implies(and(isValid(B3AgentMomentBoolean(z,y,Implies2BooleanBoolean(x,w))),isValid(B3AgentMomentBoolean(z,y,x))),isValid(B3AgentMomentBoolean(z,y,w)))),DCEC_RULE_11A).",["B3AgentMomentBoolean","Implies2BooleanBoolean"])),
("DCEC_RULE_11B",("formula(forall([Agent(z),Moment(y),Boolean(x),Boolean(w)],implies(and(isValid(B3AgentMomentBoolean(z,y,x)),isValid(B3AgentMomentBoolean(z,y,w))),isValid(B3AgentMomentBoolean(z,y,And2BooleanBoolean(x,w))))),DCEC_RULE_11B).",["B3AgentMomentBoolean","And2BooleanBoolean"])),
("DCEC_RULE_12",("formula(forall([Agent(z),Agent(x),Moment(y),Boolean(w)],implies(isValid(S4AgentAgentMomentBoolean(z,x,y,w)),isValid(B3AgentMomentBoolean(x,y,B3AgentMomentBoolean(z,y,w))))),DCEC_RULE_12).",["S4AgentAgentMomentBoolean","B3AgentMomentBoolean"])),
("DCEC_RULE_13",("formula(forall([Agent(z),Moment(y),ActionType(x),Moment(w)],implies(isValid(I3AgentMomentBoolean(z,y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),x),w))),isValid(P3AgentMomentBoolean(z,y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),x),w))))),DCEC_RULE_13).",["I3AgentMomentBoolean","Happens2EventMoment","Action2AgentActionType","Self1Agent","P3AgentMomentBoolean"])),
("DCEC_RULE_14",("formula(forall([Agent(z),Moment(y),Boolean(x),ActionType(w),Moment(v)],implies(and(isValid(Lessorequal2MomentMoment(y,v)),isValid(B3AgentMomentBoolean(z,y,x)),isValid(B3AgentMomentBoolean(z,y,O4AgentMomentBooleanBoolean(Self1Agent(z),y,x,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),v)))),isValid(O4AgentMomentBooleanBoolean(z,y,x,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),v)))),isValid(K3AgentMomentBoolean(z,y,I3AgentMomentBoolean(Self1Agent(z),y,Happens2EventMoment(Action2AgentActionType(Self1Agent(z),w),v)))))),DCEC_RULE_14).",["Lessorequal2MomentMoment","B3AgentMomentBoolean","O4AgentMomentBooleanBoolean","Self1Agent","Happens2EventMoment","Action2AgentActionType","I3AgentMomentBoolean"])),
("DCEC_RULE_15",("formula(forall([Boolean(z),Boolean(y),Agent(x),Moment(w),Boolean(v)],implies(isValid(Iff2BooleanBoolean(z,y)),isValid(Iff2BooleanBoolean(O4AgentMomentBooleanBoolean(x,w,z,v),O4AgentMomentBooleanBoolean(x,w,y,v))))),DCEC_RULE_15).",["Iff2BooleanBoolean","O4AgentMomentBooleanBoolean"]))
])
basicLogicRules=dict([
("MODUS_PONENS",("formula(forall([Boolean(z),Boolean(y)],implies(and(isValid(z),isValid(Implies2BooleanBoolean(z,y))),isValid(y))),MODUS_PONENS).",["Implies2BooleanBoolean"])),
("CONJUNCTION_INTRODUCTION",("formula(forall([Boolean(z),Boolean(y)],implies(and(isValid(z),isValid(y)),isValid(And2BooleanBoolean(z,y)))),CONJUNCTION_INTRODUCTION).",["And2BooleanBoolean"])),
("SIMPLIFICATION",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(z,y)),isValid(z))),SIMPLIFICATION).",["And2BooleanBoolean"])),
("SIMPLIFICATION1",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(z,y)),isValid(y))),SIMPLIFICATION).",["And2BooleanBoolean"])),
("WEAKENING",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(z,y)),isValid(Or2BooleanBoolean(z,y)))),WEAKENING).",["And2BooleanBoolean","Or2BooleanBoolean",])),
("DEMORGAN",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Or2BooleanBoolean(Not1Boolean(z),Not1Boolean(y))),isValid(Not1Boolean(And2BooleanBoolean(z,y))))),DEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean"])),
("DEMORGAN1",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Not1Boolean(And2BooleanBoolean(z,y))),isValid(Or2BooleanBoolean(Not1Boolean(z),Not1Boolean(y))))),DEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean"])),
("DEMORGAN2",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(Not1Boolean(z),Not1Boolean(y))),isValid(Not1Boolean(Or2BooleanBoolean(z,y))))),DEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean"])),
("DEMORGAN3",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Not1Boolean(Or2BooleanBoolean(z,y))),isValid(And2BooleanBoolean(Not1Boolean(z),Not1Boolean(y))))),DEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean"])),
("DISTRIBUTION",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(Or2BooleanBoolean(z,And2BooleanBoolean(y,x))),isValid(And2BooleanBoolean(Or2BooleanBoolean(z,y),Or2BooleanBoolean(z,x))))),DISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean"])),
("DISTRIBUTION1",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(And2BooleanBoolean(Or2BooleanBoolean(z,y),Or2BooleanBoolean(z,x))),isValid(Or2BooleanBoolean(z,And2BooleanBoolean(y,x))))),DISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean"])),
("DISTRIBUTION2",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(And2BooleanBoolean(z,Or2BooleanBoolean(y,x))),isValid(Or2BooleanBoolean(And2BooleanBoolean(z,y),And2BooleanBoolean(z,x))))),DISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean"])),
("DISTRIBUTION3",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(Or2BooleanBoolean(And2BooleanBoolean(z,y),And2BooleanBoolean(z,x))),isValid(And2BooleanBoolean(z,Or2BooleanBoolean(y,x))))),DISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean"])),
("COMMUTATIVITY_OF_AND",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(z,y)),isValid(And2BooleanBoolean(y,z)))),COMMUTATIVITY_OF_AND).",["And2BooleanBoolean"])),
("COMMUTATIVITY_OF_OR",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Or2BooleanBoolean(z,y)),isValid(Or2BooleanBoolean(y,z)))),COMMUTATIVITY_OF_OR).",["Or2BooleanBoolean"])),
("COMMUTATIVITY_OF_XOR",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Xor2BooleanBoolean(z,y)),isValid(Xor2BooleanBoolean(y,z)))),COMMUTATIVITY_OF_XOR).",["Xor2BooleanBoolean"])),
("DEFINITION_OF_XOR",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Or2BooleanBoolean(And2BooleanBoolean(z,Not1Boolean(y)),And2BooleanBoolean(Not1Boolean(z),y))),isValid(Xor2BooleanBoolean(z,y)))),DEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean"])),
("DEFINITION_OF_XOR1",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Xor2BooleanBoolean(z,y)),isValid(Or2BooleanBoolean(And2BooleanBoolean(z,Not1Boolean(y)),And2BooleanBoolean(Not1Boolean(z),y))))),DEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean"])),
("DEFINITION_OF_XOR2",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(Or2BooleanBoolean(z,y),Not1Boolean(And2BooleanBoolean(z,y)))),isValid(Xor2BooleanBoolean(z,y)))),DEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean"])),
("DEFINITION_OF_XOR3",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Xor2BooleanBoolean(z,y)),isValid(And2BooleanBoolean(Or2BooleanBoolean(z,y),Not1Boolean(And2BooleanBoolean(z,y)))))),DEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean"])),
("DISJUNCTIVE_SYLLOGISM",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Or2BooleanBoolean(Not1Boolean(z),y)),isValid(Implies2BooleanBoolean(z,y)))),DISJUNCTIVE_SYLLOGISM).",["Not1Boolean","Or2BooleanBoolean","Implies2BooleanBoolean"])),
("DISJUNCTIVE_SYLLOGISM1",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Implies2BooleanBoolean(z,y)),isValid(Or2BooleanBoolean(Not1Boolean(z),y)))),DISJUNCTIVE_SYLLOGISM).",["Not1Boolean","Or2BooleanBoolean","Implies2BooleanBoolean"])),
("CUT_ELIMINATION",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(And2BooleanBoolean(Implies2BooleanBoolean(z,y),Implies2BooleanBoolean(y,x))),isValid(Implies2BooleanBoolean(z,x)))),CUT_ELIMINATION).",["And2BooleanBoolean","Implies2BooleanBoolean"])),
("DISJUNCTION_ELIMINATION",("formula(forall([Boolean(z),Boolean(y),Boolean(x)],implies(isValid(And2BooleanBoolean(And2BooleanBoolean(Or2BooleanBoolean(z,y),Implies2BooleanBoolean(z,x)),Implies2BooleanBoolean(y,x))),isValid(x))),DISJUNCTION_ELIMINATION).",["And2BooleanBoolean","Or2BooleanBoolean","Implies2BooleanBoolean"])),
("DEFINITION_OF_IFF",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(Iff2BooleanBoolean(z,y)),isValid(And2BooleanBoolean(And2BooleanBoolean(Implies2BooleanBoolean(z,y),Implies2BooleanBoolean(y,z)),And2BooleanBoolean(Implies2BooleanBoolean(Not1Boolean(z),Not1Boolean(y)),Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(z))))))),DEFINITION_OF_IFF).",["And2BooleanBoolean","Not1Boolean","Implies2BooleanBoolean","Iff2BooleanBoolean"])),
("DEFINITION_OF_IFF1",("formula(forall([Boolean(z),Boolean(y)],implies(isValid(And2BooleanBoolean(And2BooleanBoolean(Implies2BooleanBoolean(z,y),Implies2BooleanBoolean(y,z)),And2BooleanBoolean(Implies2BooleanBoolean(Not1Boolean(z),Not1Boolean(y)),Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(z))))),isValid(Iff2BooleanBoolean(z,y)))),DEFINITION_OF_IFF).",["And2BooleanBoolean","Not1Boolean","Implies2BooleanBoolean","Iff2BooleanBoolean"])),
])
commonlyKnownLogicRules=dict([
("CMODUS_PONENS",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,Implies2BooleanBoolean(y,x)),x)))),CMODUS_PONENS).",["Implies2BooleanBoolean","C2MomentBoolean"])), ("CCONJUNCTION_INTRODUCTION",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(and(y,x),And2BooleanBoolean(y,x)))),CCONJUNCTION_INTRODUCTION).",["And2BooleanBoolean","C2MomentBoolean"])),
("CCONJUNCTION_INTRODUCTION",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,x),And2BooleanBoolean(y,x))))),CCONJUNCTION_INTRODUCTION).",["And2BooleanBoolean","C2MomentBoolean"])),
("CSIMPLIFICATION",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,x),y)))),CSIMPLIFICATION).",["And2BooleanBoolean","C2MomentBoolean"])),
("CSIMPLIFICATION1",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,x),x)))),CSIMPLIFICATION).",["And2BooleanBoolean","C2MomentBoolean"])),
("CWEAKENING",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,x),Or2BooleanBoolean(y,x))))),CWEAKENING).",["And2BooleanBoolean","Or2BooleanBoolean",])),
("CDEMORGAN",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)),Not1Boolean(And2BooleanBoolean(y,x)))))),CDEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDEMORGAN1",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Not1Boolean(And2BooleanBoolean(y,x)),Or2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)))))),CDEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDEMORGAN2",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)),Not1Boolean(Or2BooleanBoolean(y,x)))))),CDEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDEMORGAN3",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Not1Boolean(Or2BooleanBoolean(y,x)),And2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)))))),CDEMORGAN).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDISTRIBUTION",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(y,And2BooleanBoolean(x,w)),And2BooleanBoolean(Or2BooleanBoolean(y,x),Or2BooleanBoolean(y,w)))))),CDISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDISTRIBUTION1",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(Or2BooleanBoolean(y,x),Or2BooleanBoolean(y,w)),Or2BooleanBoolean(y,And2BooleanBoolean(x,w)))))),CDISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDISTRIBUTION2",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,Or2BooleanBoolean(x,w)),Or2BooleanBoolean(And2BooleanBoolean(y,x),And2BooleanBoolean(y,w)))))),CDISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CDISTRIBUTION3",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(And2BooleanBoolean(y,x),And2BooleanBoolean(y,w)),And2BooleanBoolean(y,Or2BooleanBoolean(x,w)))))),CDISTRIBUTION).",["And2BooleanBoolean","Or2BooleanBoolean","C2MomentBoolean"])),
("CCOMMUTATIVITY_OF_AND",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(y,x),And2BooleanBoolean(x,y)))))),CCOMMUTATIVITY_OF_AND).",["And2BooleanBoolean","C2MomentBoolean"])),
("CCOMMUTATIVITY_OF_OR",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(y,x),Or2BooleanBoolean(x,y))))),CCOMMUTATIVITY_OF_OR).",["Or2BooleanBoolean","C2MomentBoolean"])),
("CCOMMUTATIVITY_OF_XOR",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Xor2BooleanBoolean(y,x),Xor2BooleanBoolean(x,y))))),CCOMMUTATIVITY_OF_XOR).",["Xor2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_XOR",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(And2BooleanBoolean(y,Not1Boolean(x)),And2BooleanBoolean(Not1Boolean(y),x)),Xor2BooleanBoolean(y,x))))),CDEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_XOR1",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Xor2BooleanBoolean(y,x),Or2BooleanBoolean(And2BooleanBoolean(y,Not1Boolean(x)),And2BooleanBoolean(Not1Boolean(y),x)))))),CDEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_XOR2",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(Or2BooleanBoolean(y,x),Not1Boolean(And2BooleanBoolean(y,x))),Xor2BooleanBoolean(y,x))))),CDEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_XOR3",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Xor2BooleanBoolean(y,x),And2BooleanBoolean(Or2BooleanBoolean(y,x),Not1Boolean(And2BooleanBoolean(y,x))))))),CDEFINITION_OF_XOR).",["And2BooleanBoolean","Not1Boolean","Or2BooleanBoolean","Xor2BooleanBoolean","C2MomentBoolean"])),
("CDISJUNCTIVE_SYLLOGISM",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Or2BooleanBoolean(Not1Boolean(y),x),Implies2BooleanBoolean(y,x))))),CDISJUNCTIVE_SYLLOGISM).",["Not1Boolean","Or2BooleanBoolean","Implies2BooleanBoolean","C2MomentBoolean"])),
("CDISJUNCTIVE_SYLLOGISM1",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Implies2BooleanBoolean(y,x),Or2BooleanBoolean(Not1Boolean(y),x))))),CDISJUNCTIVE_SYLLOGISM).",["Not1Boolean","Or2BooleanBoolean","Implies2BooleanBoolean","C2MomentBoolean"])),
("CCUT_ELIMINATION",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(Implies2BooleanBoolean(y,x),Implies2BooleanBoolean(x,w)),Implies2BooleanBoolean(y,w))))),CCUT_ELIMINATION).",["And2BooleanBoolean","Implies2BooleanBoolean","C2MomentBoolean"])),
("CDISJUNCTION_ELIMINATION",("formula(forall([Moment(z),Boolean(y),Boolean(x),Boolean(w)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(And2BooleanBoolean(Or2BooleanBoolean(y,x),Implies2BooleanBoolean(y,w)),Implies2BooleanBoolean(x,w)),w)))),CDISJUNCTION_ELIMINATION).",["And2BooleanBoolean","Or2BooleanBoolean","Implies2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_IFF",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(Iff2BooleanBoolean(y,x),And2BooleanBoolean(And2BooleanBoolean(Implies2BooleanBoolean(y,x),Implies2BooleanBoolean(x,y)),And2BooleanBoolean(Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)),Implies2BooleanBoolean(Not1Boolean(x),Not1Boolean(y)))))))),CDEFINITION_OF_IFF).",["And2BooleanBoolean","Not1Boolean","Implies2BooleanBoolean","Iff2BooleanBoolean","C2MomentBoolean"])),
("CDEFINITION_OF_IFF1",("formula(forall([Moment(z),Boolean(y),Boolean(x)],isValid(C2MomentBoolean(z,Implies2BooleanBoolean(And2BooleanBoolean(And2BooleanBoolean(Implies2BooleanBoolean(y,x),Implies2BooleanBoolean(x,y)),And2BooleanBoolean(Implies2BooleanBoolean(Not1Boolean(y),Not1Boolean(x)),Implies2BooleanBoolean(Not1Boolean(x),Not1Boolean(y)))),Iff2BooleanBoolean(y,x))))),CDEFINITION_OF_IFF).",["And2BooleanBoolean","Not1Boolean","Implies2BooleanBoolean","Iff2BooleanBoolean","C2MomentBoolean"])),
])
symbolReverter = dict()
#This constructor creates one of these containers and runs a proof with it
#axiomContainer is a dcecContainer containing all the statements that will serve as axioms
#query is a valid string within that container or a token tree from that container
#justify indicates whether the prover should print a justification of the proof
#timeout is the number of seconds the proof should run for before quitting
#options is a string of SPASS options for the prover
#simultaneous indicates whether a modified series of DCEC Inference rules that take place only in one time are used
#discover indicates whether new statements that are discovered should be printed
def __init__(self, axiomContainer, query, justify=False, timeout=-1, options="-Auto", simultaneous=False, discover=False, rules=["DCEC_RULE_1","DCEC_RULE_2","DCEC_RULE_3","DCEC_RULE_4","DCEC_RULE_5","DCEC_RULE_6","DCEC_RULE_7","DCEC_RULE_9","DCEC_RULE_10","DCEC_RULE_11A","DCEC_RULE_11B","DCEC_RULE_12","DCEC_RULE_13","DCEC_RULE_14","DCEC_RULE_15","CCOMMUTATIVITY_OF_AND","CCOMMUTATIVITY_OF_OR","CCOMMUTATIVITY_OF_XOR","CCONJUNCTION_INTRODUCTION","CCUT_ELIMINATION","CDEFINITION_OF_IFF","CDEFINITION_OF_XOR","CDEMORGAN","CDISJUNCTION ELIMINATION","CDISJUNCTIVE_SYLLOGISM","CDISTRIBUTION","CMODUS_PONENS","COMMUTATIVITY_OF_AND","COMMUTATIVITY_OF_OR","COMMUTATIVITY_OF_XOR","CONJUNCTION_INTRODUCTION","CSIMPLIFICATION","CUT_ELIMINATION","CWEAKENING","DEFINITION_OF_IFF","DEFINITION_OF_XOR","DEMORGAN","DISJUNCTION_ELIMINATION","DISJUNCTIVE_SYLLOGISM","DISTRIBUTION","MODUS_PONENS","SIMPLIFICATION","WEAKENING"]):
self.sorts=axiomContainer.namespace.sorts
self.addInferenceRules(simultaneous,rules,axiomContainer)
parsedStatements = self.parseStatements(axiomContainer)
parsedQuery = self.parseStatement(axiomContainer,query,"CONJECTURE")
self.conjecture = query
self.addFunctions()
self.addSorts(axiomContainer)
self.addFormulas(simultaneous,rules,parsedStatements)
self.addQuery(parsedQuery)
self.spassExecute(axiomContainer,justify, timeout, options, discover,simultaneous)
def addInferenceRules(self, simultaneous, rules,container):
necessaryFuncDefs =dict([
("Action2AgentActionType",("action",2,"Action",["Agent","ActionType"])),
("Happens2EventMoment",("happens",2,"Boolean",["Event","Moment","Boolean"])),
("Self1Agent",("self",1,"Self",["Agent"])),
("P3AgentMomentBoolean",("P",3,"Boolean",["Agent","Moment","Boolean"])),
("C2MomentBoolean",("C",2,"Boolean",["Moment","Boolean"])),
("B3AgentMomentBoolean",("B",3,"Boolean",["Agent","Moment","Boolean"])),
("K3AgentMomentBoolean",("K",3,"Boolean",["Agent","Moment","Boolean"])),
("S4AgentAgentMomentBoolean",("S",4,"Boolean",["Agent","Agent","Moment","Boolean"])),
("I3AgentMomentBoolean",("I",3,"Boolean",["Agent","Moment","Boolean"])),
("O4AgentMomentBooleanBoolean",("O",4,"Boolean",["Agent","Moment","Boolean","Boolean"])),
("Not1Boolean",("not",1,"Boolean",["Boolean"])),
("Implies2BooleanBoolean",("implies",2,"Boolean",["Boolean","Boolean"])),
("And2BooleanBoolean",("and",2,"Boolean",["Boolean","Boolean"])),
("Or2BooleanBoolean",("or",2,"Boolean",["Boolean","Boolean"])),
("Xor2BooleanBoolean",("xor",2,"Boolean",["Boolean","Boolean"])),
("Iff2BooleanBoolean",("iff",2,"Boolean",["Boolean","Boolean"])),
("Lessorequal2MomentMoment",("lessOrEqual",2,"Boolean",["Moment","Moment"])),
])
for r in rules:
if simultaneous and r in self.simultaneousRules:
for t in self.simultaneousRules[r][1]:
self.symbolReverter[t]=necessaryFuncDefs[t]
container.namespace.addCodeFunction(necessaryFuncDefs[t][0],necessaryFuncDefs[t][2],necessaryFuncDefs[t][3])
if not simultaneous and r in self.temporalRules:
for t in self.temporalRules[r][1]:
self.symbolReverter[t]=necessaryFuncDefs[t]
container.namespace.addCodeFunction(necessaryFuncDefs[t][0],necessaryFuncDefs[t][2],necessaryFuncDefs[t][3])
if r in self.basicLogicRules:
for t in self.basicLogicRules[r][1]:
self.symbolReverter[t]=necessaryFuncDefs[t]
container.namespace.addCodeFunction(necessaryFuncDefs[t][0],necessaryFuncDefs[t][2],necessaryFuncDefs[t][3])
if r in self.commonlyKnownLogicRules:
for t in self.commonlyKnownLogicRules[r][1]:
self.symbolReverter[t]=necessaryFuncDefs[t]
container.namespace.addCodeFunction(necessaryFuncDefs[t][0],necessaryFuncDefs[t][2],necessaryFuncDefs[t][3])
def addSorts(self,container):
self.input+="].\npredicates[\n(isValid,1)\n].\nsorts["
for sort in container.namespace.sorts.keys():
self.input+=sort+","
self.input=self.input[:-1]
self.input +="\n].\nend_of_list.\n\nlist_of_declarations.\n"
for sort in container.namespace.sorts:
if container.namespace.sorts[sort]!=[]:
for subsort in container.namespace.sorts[sort]:
self.input+="subsort("+sort+","+subsort+").\n"
temp = ["Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F","E", "D", "C", "B", "A"]
for key in self.symbolReverter:
if self.symbolReverter[key][1]>0:
self.input+="forall(["
for x in range(0,self.symbolReverter[key][1]):
self.input+=temp[x]+","
self.input=self.input[:-1]+"],"
self.input+=self.symbolReverter[key][2]+"("+key
if self.symbolReverter[key][1]>0:
self.input+="("
for x in range(0,self.symbolReverter[key][1]):
self.input+=temp[x]+","
self.input=self.input[:-1]+"))"
self.input+=").\n"
#This method takes in a string representing a variable or function and makes it a unique identifier
#(This way I don't have to muck about with differentiating between overloaded stuff)
#It can be converted backwards later
def convertToTerm(self, inputStr,returnType,sorts):
convertedStr = inputStr.title()+str(len(sorts))
for sort in sorts:
convertedStr+=sort
self.symbolReverter.setdefault(convertedStr,(inputStr,len(sorts),returnType,sorts))
return convertedStr
#This method takes in a DCECContainer and parses all the statements inside
#It returns a list of fully parsed statements
def parseStatements(self, axiomContainer):
parsedStatements=[]
axiomNum=1
for statement in axiomContainer.statements:
parsed=self.parseStatement(axiomContainer,statement,"AXIOM_"+str(axiomNum))
self.axioms.setdefault("AXIOM_"+str(axiomNum),(axiomContainer,statement))
parsedStatements.append(parsed)
axiomNum+=1
return parsedStatements
#This method takes in a DCECContainer, a statement, a name for this axiom, and any quantified variables
def parseStatement(self, container, statement, name, vars=[]):
if statement is None:
return ""
tmp=False
if isinstance(statement,str):
newContainer = container.tokenize(statement)
if newContainer == False:
raise ValueError("The query is invalid within that set of axioms.")
if statement==newContainer.statements[0]:
return "isValid("+self.convertToTerm(statement, newContainer.sortOf(statement),[])+"),"+name;
return self.parseStatement(newContainer,newContainer.statements[0],name,vars)
parsed = ""
if statement.funcName == "forAll" or statement.funcName == "exists":
if container.sortOf(statement.args[0]) == None:
parsed += statement.funcName.lower() + "(["+statement.args[0] + "]," + self.parseStatement(container,statement.args[1], "", vars + [statement.args[0]]) + ")"
else:
parsed += statement.funcName.lower() + "(["+container.sortOf(statement.args[0])+"("+statement.args[0] + ")]," + self.parseStatement(container,statement.args[1], "", vars + [statement.args[0]]) + ")"
elif len(statement.args) == 0:
parsed += "isValid(" + self.convertToTerm(statement.funcName,container.sortOf(statement),[]) + ")"
else:
parsed += "isValid(" + self.convertToTerm(statement.funcName, container.sortOf(statement),container.sortsOfParams(statement))+ "("
parsed += self.parseSubStatements(container,statement.args, vars) + ")"
parsed = ")".join(parsed.split(",)"))
if name != "":
parsed += ")," + name
return parsed
def parseSubStatements(self, container, substmts, vars=[]):
parsed = ""
for substmt in substmts:
if not isinstance(substmt,str):
parsed += self.convertToTerm(substmt.funcName,container.sortOf(substmt),container.sortsOfParams(substmt)) + "("
parsed += self.parseSubStatements(container,substmt.args, vars) + "),"
else:
if not substmt in vars:
parsed += self.convertToTerm(substmt,container.sortOf(substmt),[]) + ","
else:
parsed += substmt + ","
return parsed
def spassExecute(self, container, justify=False, timelimit=-1, options="-Auto", discover=False,simultaneous=False):
findProof=True
if discover or justify:
for statement in container.statements:
if statement.createSExpression().find("exists")!=-1:
print "DUE TO A QUIRK OF SPASS OUTPUT PROOFS AND GENERATED STATEMENTS CANNOT BE DERIVED FROM STATEMENTS WITH exists"
findProof=False
discover=False
break
self.errors = ""
self.output = ""
self.rulesresult = None
self.rules = []
command = [self.directory + "/SPASS-3.7/SPASS/SPASS", "-TimeLimit=" + str(timelimit), "-Stdin"]
command += options.split(" ")
if justify:
command.append("-DocProof")
self.spass = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.output, self.errors = self.spass.communicate(self.input)
self.parseProof(justify, findProof, discover, container,simultaneous)
def addFunctions(self):
self.input +="begin_problem(Test).\nlist_of_descriptions.\nname({*A DCEC Problem*}).\nauthor({*James Pane-Joyce*}).\nstatus(unsatisfiable).\ndescription({*This is a SPASS proof generated off of DCEC statements.*}).\nend_of_list.\n\nlist_of_symbols.\nfunctions[\n"
for key in self.symbolReverter:
self.input += "(" + key + "," + str(self.symbolReverter[key][1]) + "),\n"
self.input = self.input[:-2] + "\n"
def addQuery(self, query):
self.input += "\nend_of_list.\n\nlist_of_formulae(conjectures).\n"
self.input += "formula(" + query + ").\n"
self.input += "end_of_list.\n\nend_problem.\n"
def addFormulas(self, simultaneous, rules, formulas):
self.input += "end_of_list.\n\nlist_of_formulae(axioms).\nformula(forall([x],implies(isValid(x),Boolean(x))),SORTING_RULE).\n"
for r in rules:
tmp=r
ittr=1
if simultaneous:
while tmp in self.simultaneousRules:
self.input += self.simultaneousRules[tmp][0]+"\n"
tmp=r+str(ittr)
ittr+=1
if not simultaneous:
while tmp in self.temporalRules:
self.input += self.temporalRules[tmp][0]+"\n"
tmp=r+str(ittr)
ittr+=1
while tmp in self.basicLogicRules:
self.input += self.basicLogicRules[tmp][0]+"\n"
tmp=r+str(ittr)
ittr+=1
while tmp in self.commonlyKnownLogicRules:
self.input += self.commonlyKnownLogicRules[tmp][0]+"\n"
tmp=r+str(ittr)
ittr+=1
for formula in formulas:
self.input += "formula(" + formula + ").\n"
def parseProof(self, justified, findProof, discover, container,simultaneous):
index = self.output.find("SPASS beiseite: ")
resultStr = self.output[index:self.output.find("\n", index)]
if self.errors != "":
self.result = ("Error Found", [], [])
elif resultStr.find("Ran out of time.") != -1:
self.result = ("Time Up", [], [])
elif resultStr.find("Completion found.") != -1:
self.result = ("Completion Found", [], [])
elif resultStr.find("Proof found.") != -1:
self.result = ("Proof Found", [], [])
if justified:
index = self.output.find("Here is a proof with ")
proofStr = self.output[self.output.find("\n", index):self.output.find("\nFormulae used in the proof",index)]
if findProof:
emptyContainer=DCECContainer()
emptyContainer.namespace=container.namespace
self.proof=proofTree(proofStr,emptyContainer,simultaneous,self.symbolReverter)
index = self.output.find("Formulae used in the proof : ") + len("Formulae used in the proof : ")
proofStr = self.output[index:self.output.find("\n", index)]
steps = proofStr.split(" ")
for x in range(len(steps)-1,-1,-1):
if steps[x]=="CONJECTURE":
steps.pop(x)
elif steps[x]=="SORTING_RULE":
steps.pop(x)
elif steps[x].startswith("declaration"):
steps.pop(x)
steps=Set(steps)
for step in steps:
if step in self.axioms:
self.result[1].append(self.axioms[step])
else:
self.result[2].append(step)
logic = self.output[self.output.find("Given clause:"):self.output.find("SPASS V 3.7")]
logic = logic.split("\n")
self.discoveries=DCECContainer()
self.discoveries.namespace=container.namespace
if discover:
index = len(logic) - 1
while index >= 0:
if logic[index][-4:]=="-> .":
logic.pop(index)
else:
logic[index]=logic[index][logic[index].find("]")+1:]
tmperarotya= toSNotation(logic[index],container.namespace.sorts,self.symbolReverter)
if tmperarotya!="":
self.discoveries.addStatement(tmperarotya)
index-=1
def getResult(self):
return self.result
def resultToString(self):
output = self.result[0] + "\n"
for statement in self.result[1]:
output += "\t" + statement[0].printStatement(statement[1]) + "\n"
for rule in self.result[2]:
output += "\t\t" + rule + "\n"
return output
def discoveriesToString(self):
output = ""
for discovery in self.discoveries.statements:
output += self.discoveries.printStatement(discovery)+"\n"
return output
def getOutput(self):
return self.output
def getErrors(self):
return self.errors
def getAxioms(self):
return self.axioms
def getConjecture(self):
return self.conjecture
def getRules(self):
return self.rules
def proofToString(self):
output=""
if self.proof==None:
return None
for line in self.proof.proofTree.keys():
output += line+"\n"
for p in self.proof.proofTree[line]:
output += "\t"+p+"\n"
return output
def proofToSlate(self):
if self.proof==None:
return self.proof
counter=1
numLookup=dict()
emptySlate="(:DESCRIPTIONS(\n"
for statement in self.proof.proofTree.keys():
emptySlate+='\t(:X 0 :Y 0 :ID '+str(counter)+' :NAME "" :FORMULA "'
emptySlate+=statement+'"'
emptySlate+=" :JUSTIFICATION LOGIC::ASSUME)\n"
numLookup[statement]=counter
counter+=1
emptySlate=emptySlate.replace('forAll','\\\\forall')
for sort in self.sorts:
emptySlate=emptySlate.replace(" "+sort+" ",' ')
emptySlate+="\n) :STRUCTURES ("
counter=1
for statement in self.proof.proofTree.keys():
premiseInts=[]
for x in self.proof.proofTree[statement]:
premiseInts.append(str(numLookup[x]))
premiseInts="("+(" ".join(premiseInts))+")"
if premiseInts=="()":
premiseInts="NIL"
emptySlate+="(:CONCLUSION "+str(counter)+" :PREMISES "+premiseInts+")"
counter+=1
emptySlate+=") :INTERFACE (:X 268 :Y 29 :WIDTH 1920 :HEIGHT 1037 :PROOF-SYSTEM LOGIC::FIRST-ORDER-LOGIC))"
return emptySlate
| gpl-2.0 | -5,851,352,938,676,213,000 | 95.205628 | 914 | 0.695503 | false |
Aaron1992/flask | tests/test_instance_config.py | 4 | 4365 | # -*- coding: utf-8 -*-
"""
tests.test_instance
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by the Flask Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pytest
import flask
from flask._compat import PY2
def test_explicit_instance_paths(modules_tmpdir):
with pytest.raises(ValueError) as excinfo:
flask.Flask(__name__, instance_path='instance')
assert 'must be absolute' in str(excinfo.value)
app = flask.Flask(__name__, instance_path=str(modules_tmpdir))
assert app.instance_path == str(modules_tmpdir)
def test_main_module_paths(modules_tmpdir, purge_module):
app = modules_tmpdir.join('main_app.py')
app.write('import flask\n\napp = flask.Flask("__main__")')
purge_module('main_app')
from main_app import app
here = os.path.abspath(os.getcwd())
assert app.instance_path == os.path.join(here, 'instance')
def test_uninstalled_module_paths(modules_tmpdir, purge_module):
app = modules_tmpdir.join('config_module_app.py').write(
'import os\n'
'import flask\n'
'here = os.path.abspath(os.path.dirname(__file__))\n'
'app = flask.Flask(__name__)\n'
)
purge_module('config_module_app')
from config_module_app import app
assert app.instance_path == str(modules_tmpdir.join('instance'))
def test_uninstalled_package_paths(modules_tmpdir, purge_module):
app = modules_tmpdir.mkdir('config_package_app')
init = app.join('__init__.py')
init.write(
'import os\n'
'import flask\n'
'here = os.path.abspath(os.path.dirname(__file__))\n'
'app = flask.Flask(__name__)\n'
)
purge_module('config_package_app')
from config_package_app import app
assert app.instance_path == str(modules_tmpdir.join('instance'))
def test_installed_module_paths(modules_tmpdir, modules_tmpdir_prefix,
purge_module, site_packages, limit_loader):
site_packages.join('site_app.py').write(
'import flask\n'
'app = flask.Flask(__name__)\n'
)
purge_module('site_app')
from site_app import app
assert app.instance_path == \
modules_tmpdir.join('var').join('site_app-instance')
def test_installed_package_paths(limit_loader, modules_tmpdir,
modules_tmpdir_prefix, purge_module,
monkeypatch):
installed_path = modules_tmpdir.mkdir('path')
monkeypatch.syspath_prepend(installed_path)
app = installed_path.mkdir('installed_package')
init = app.join('__init__.py')
init.write('import flask\napp = flask.Flask(__name__)')
purge_module('installed_package')
from installed_package import app
assert app.instance_path == \
modules_tmpdir.join('var').join('installed_package-instance')
def test_prefix_package_paths(limit_loader, modules_tmpdir,
modules_tmpdir_prefix, purge_module,
site_packages):
app = site_packages.mkdir('site_package')
init = app.join('__init__.py')
init.write('import flask\napp = flask.Flask(__name__)')
purge_module('site_package')
import site_package
assert site_package.app.instance_path == \
modules_tmpdir.join('var').join('site_package-instance')
def test_egg_installed_paths(install_egg, modules_tmpdir,
modules_tmpdir_prefix):
modules_tmpdir.mkdir('site_egg').join('__init__.py').write(
'import flask\n\napp = flask.Flask(__name__)'
)
install_egg('site_egg')
try:
import site_egg
assert site_egg.app.instance_path == \
str(modules_tmpdir.join('var/').join('site_egg-instance'))
finally:
if 'site_egg' in sys.modules:
del sys.modules['site_egg']
@pytest.mark.skipif(not PY2, reason='This only works under Python 2.')
def test_meta_path_loader_without_is_package(request, modules_tmpdir):
app = modules_tmpdir.join('unimportable.py')
app.write('import flask\napp = flask.Flask(__name__)')
class Loader(object):
def find_module(self, name, path=None):
return self
sys.meta_path.append(Loader())
request.addfinalizer(sys.meta_path.pop)
with pytest.raises(AttributeError):
import unimportable
| bsd-3-clause | -1,997,328,578,266,624,000 | 31.574627 | 75 | 0.629095 | false |
openstack/nova | nova/tests/unit/console/test_websocketproxy.py | 2 | 29864 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for nova websocketproxy."""
import copy
import socket
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import nova.conf
from nova.console.securityproxy import base
from nova.console import websocketproxy
from nova import context as nova_context
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit import fake_console_auth_token as fake_ca
from nova import utils
CONF = nova.conf.CONF
class NovaProxyRequestHandlerDBTestCase(test.TestCase):
def setUp(self):
super(NovaProxyRequestHandlerDBTestCase, self).setUp()
self.flags(console_allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'])
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = websocketproxy.NovaWebSocketProxy()
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def _fake_console_db(self, **updates):
console_db = copy.deepcopy(fake_ca.fake_token_dict)
console_db['token_hash'] = utils.get_sha256_str('123-456-789')
if updates:
console_db.update(updates)
return console_db
fake_header = {
'cookie': 'token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
def test_new_websocket_client_db(
self, mock_validate_port, mock_inst_get, mock_validate,
internal_access_path=None,
instance_not_found=False):
db_obj = self._fake_console_db(
host='node1',
port=10000,
console_type='novnc',
access_url_base='https://example.net:6080',
internal_access_path=internal_access_path,
instance_uuid=uuids.instance,
# This is set by ConsoleAuthToken.validate
token='123-456-789'
)
ctxt = nova_context.get_context()
obj = nova.objects.ConsoleAuthToken._from_db_object(
ctxt, nova.objects.ConsoleAuthToken(), db_obj)
mock_validate.return_value = obj
if instance_not_found:
mock_inst_get.side_effect = exception.InstanceNotFound(
instance_id=uuids.instance)
if internal_access_path is None:
self.wh.socket.return_value = '<socket>'
else:
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
if instance_not_found:
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
else:
with mock.patch('nova.context.get_admin_context',
return_value=ctxt):
self.wh.new_websocket_client()
mock_validate.assert_called_once_with(ctxt, '123-456-789')
mock_validate_port.assert_called_once_with(
ctxt, mock_inst_get.return_value, str(db_obj['port']),
db_obj['console_type'])
self.wh.socket.assert_called_with('node1', 10000, connect=True)
if internal_access_path is None:
self.wh.do_proxy.assert_called_with('<socket>')
else:
self.wh.do_proxy.assert_called_with(tsock)
def test_new_websocket_client_db_internal_access_path(self):
self.test_new_websocket_client_db(internal_access_path='vmid')
def test_new_websocket_client_db_instance_not_found(self):
self.test_new_websocket_client_db(instance_not_found=True)
class NovaProxyRequestHandlerTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaProxyRequestHandlerTestCase, self).setUp()
self.flags(allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'],
group='console')
self.server = websocketproxy.NovaWebSocketProxy()
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = self.server
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
fake_header = {
'cookie': 'token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_ipv6 = {
'cookie': 'token="123-456-789"',
'Origin': 'https://[2001:db8::1]:6080',
'Host': '[2001:db8::1]:6080',
}
fake_header_bad_token = {
'cookie': 'token="XXX"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_bad_origin = {
'cookie': 'token="123-456-789"',
'Origin': 'https://bad-origin-example.net:6080',
'Host': 'example.net:6080',
}
fake_header_allowed_origin = {
'cookie': 'token="123-456-789"',
'Origin': 'https://allowed-origin-example-2.net:6080',
'Host': 'example.net:6080',
}
fake_header_blank_origin = {
'cookie': 'token="123-456-789"',
'Origin': '',
'Host': 'example.net:6080',
}
fake_header_no_origin = {
'cookie': 'token="123-456-789"',
'Host': 'example.net:6080',
}
fake_header_http = {
'cookie': 'token="123-456-789"',
'Origin': 'http://example.net:6080',
'Host': 'example.net:6080',
}
fake_header_malformed_cookie = {
'cookie': '?=!; token="123-456-789"',
'Origin': 'https://example.net:6080',
'Host': 'example.net:6080',
}
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
# ensure that token is masked when logged
connection_info = self.wh.msg.mock_calls[0][1][1]
self.assertEqual('***', connection_info.token)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_ipv6_url(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://[2001:db8::1]:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://[2001:db8::1]/?token=123-456-789"
self.wh.headers = self.fake_header_ipv6
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_token_invalid(self, validate):
validate.side_effect = exception.InvalidToken(token='XXX')
self.wh.path = "http://127.0.0.1/?token=XXX"
self.wh.headers = self.fake_header_bad_token
self.assertRaises(exception.InvalidToken,
self.wh.new_websocket_client)
validate.assert_called_with(mock.ANY, "XXX")
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 200 OK\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
tsock.send.assert_called_with(test.MatchType(bytes))
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path_err(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'host': 'node1',
'port': '10000',
'internal_access_path': 'xxx',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
tsock.recv.return_value = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.assertRaises(exception.InvalidConnectionInfo,
self.wh.new_websocket_client)
validate.assert_called_with(mock.ANY, "123-456-789")
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_internal_access_path_rfb(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'internal_access_path': 'vmid',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
tsock = mock.MagicMock()
HTTP_RESP = "HTTP/1.1 200 OK\r\n\r\n"
RFB_MSG = "RFB 003.003\n"
# RFB negotiation message may arrive earlier.
tsock.recv.side_effect = [HTTP_RESP + RFB_MSG,
HTTP_RESP]
self.wh.socket.return_value = tsock
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.headers = self.fake_header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
tsock.recv.assert_has_calls([mock.call(4096, socket.MSG_PEEK),
mock.call(len(HTTP_RESP))])
self.wh.do_proxy.assert_called_with(tsock)
@mock.patch('socket.getfqdn')
def test_address_string_doesnt_do_reverse_dns_lookup(self, getfqdn):
request_mock = mock.MagicMock()
request_mock.makefile().readline.side_effect = [
b'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
b''
]
server_mock = mock.MagicMock()
client_address = ('8.8.8.8', 54321)
handler = websocketproxy.NovaProxyRequestHandler(
request_mock, client_address, server_mock)
handler.log_message('log message using client address context info')
self.assertFalse(getfqdn.called) # no reverse dns look up
self.assertEqual(handler.address_string(), '8.8.8.8') # plain address
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_bad_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_bad_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_allowed_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_allowed_origin
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_blank_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_blank_origin
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_no_origin_header(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_no_origin
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_https_origin_proto_http(
self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'http://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "https://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_https_origin_proto_ws(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url_base': 'ws://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "https://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_http_forwarded_proto_https(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'serial',
'access_url_base': 'wss://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
header = {
'cookie': 'token="123-456-789"',
'Origin': 'http://example.net:6080',
'Host': 'example.net:6080',
'X-Forwarded-Proto': 'https'
}
self.wh.socket.return_value = '<socket>'
self.wh.path = "https://127.0.0.1/"
self.wh.headers = header
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_new_websocket_client_novnc_bad_console_type(self, validate,
check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'bad-console-type'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header
self.assertRaises(exception.ValidationError,
self.wh.new_websocket_client)
@mock.patch('nova.console.websocketproxy.NovaProxyRequestHandler.'
'_check_console_port')
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_malformed_cookie(self, validate, check_port):
params = {
'id': 1,
'token': '123-456-789',
'instance_uuid': uuids.instance,
'host': 'node1',
'port': '10000',
'console_type': 'novnc',
'access_url_base': 'https://example.net:6080'
}
validate.return_value = objects.ConsoleAuthToken(**params)
self.wh.socket.return_value = '<socket>'
self.wh.path = "http://127.0.0.1/"
self.wh.headers = self.fake_header_malformed_cookie
self.wh.new_websocket_client()
validate.assert_called_with(mock.ANY, "123-456-789")
self.wh.socket.assert_called_with('node1', 10000, connect=True)
self.wh.do_proxy.assert_called_with('<socket>')
def test_reject_open_redirect(self):
# This will test the behavior when an attempt is made to cause an open
# redirect. It should be rejected.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
b'GET //example.com/%2F.. HTTP/1.1\r\n',
b''
]
# Collect the response data to verify at the end. The
# SimpleHTTPRequestHandler writes the response data by calling the
# request socket sendall() method.
self.data = b''
def fake_sendall(data):
self.data += data
mock_req.sendall.side_effect = fake_sendall
client_addr = ('8.8.8.8', 54321)
mock_server = mock.MagicMock()
# This specifies that the server will be able to handle requests other
# than only websockets.
mock_server.only_upgrade = False
# Constructing a handler will process the mock_req request passed in.
websocketproxy.NovaProxyRequestHandler(
mock_req, client_addr, mock_server)
# Verify no redirect happens and instead a 400 Bad Request is returned.
self.data = self.data.decode()
self.assertIn('Error code: 400', self.data)
self.assertIn('Message: URI must not start with //', self.data)
@mock.patch('nova.objects.ConsoleAuthToken.validate')
def test_no_compute_rpcapi_with_invalid_token(self, mock_validate):
"""Tests that we don't create a ComputeAPI object until we actually
need to use it to call the internal compute RPC API after token
validation succeeds. This way, we will not perform expensive object
creations when we receive unauthenticated (via token) messages. In the
past, it was possible for unauthenticated requests such as TCP RST or
requests with invalid tokens to be used to DOS the console proxy
service.
"""
# We will simulate a request with an invalid token and verify it
# will not trigger a ComputeAPI object creation.
mock_req = mock.MagicMock()
mock_req.makefile().readline.side_effect = [
b'GET /vnc.html?token=123-456-789 HTTP/1.1\r\n',
b''
]
client_addr = ('8.8.8.8', 54321)
mock_server = mock.MagicMock()
handler = websocketproxy.NovaProxyRequestHandler(
mock_req, client_addr, mock_server)
# Internal ComputeAPI reference should be None when the request handler
# is initially created.
self.assertIsNone(handler._compute_rpcapi)
# Set up a token validation to fail when the new_websocket_client
# is called to handle the request.
mock_validate.side_effect = exception.InvalidToken(token='123-456-789')
# We expect InvalidToken to be raised during handling.
self.assertRaises(exception.InvalidToken, handler.new_websocket_client)
# And our internal ComputeAPI reference should still be None.
self.assertIsNone(handler._compute_rpcapi)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_ssl_min_version_is_not_set(self, mock_select_ssl):
websocketproxy.NovaWebSocketProxy()
self.assertFalse(mock_select_ssl.called)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_ssl_min_version_not_set_by_default(self, mock_select_ssl):
websocketproxy.NovaWebSocketProxy(ssl_minimum_version='default')
self.assertFalse(mock_select_ssl.called)
@mock.patch('websockify.websocketproxy.select_ssl_version')
def test_non_default_ssl_min_version_is_set(self, mock_select_ssl):
minver = 'tlsv1_3'
websocketproxy.NovaWebSocketProxy(ssl_minimum_version=minver)
mock_select_ssl.assert_called_once_with(minver)
class NovaWebsocketSecurityProxyTestCase(test.NoDBTestCase):
def setUp(self):
super(NovaWebsocketSecurityProxyTestCase, self).setUp()
self.flags(allowed_origins=['allowed-origin-example-1.net',
'allowed-origin-example-2.net'],
group='console')
self.server = websocketproxy.NovaWebSocketProxy(
security_proxy=mock.MagicMock(
spec=base.SecurityProxy)
)
with mock.patch('websockify.ProxyRequestHandler'):
self.wh = websocketproxy.NovaProxyRequestHandler()
self.wh.server = self.server
self.wh.path = "http://127.0.0.1/?token=123-456-789"
self.wh.socket = mock.MagicMock()
self.wh.msg = mock.MagicMock()
self.wh.do_proxy = mock.MagicMock()
self.wh.headers = mock.MagicMock()
def get_header(header):
if header == 'cookie':
return 'token="123-456-789"'
elif header == 'Origin':
return 'https://example.net:6080'
elif header == 'Host':
return 'example.net:6080'
else:
return
self.wh.headers.get = get_header
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
@mock.patch('nova.console.websocketproxy.TenantSock.close')
@mock.patch('nova.console.websocketproxy.TenantSock.finish_up')
def test_proxy_connect_ok(self, mock_finish, mock_close,
mock_port_validate, mock_get,
mock_token_validate):
mock_token_validate.return_value = nova.objects.ConsoleAuthToken(
instance_uuid=uuids.instance, host='node1', port='10000',
console_type='novnc', access_url_base='https://example.net:6080')
# The token and id attributes are set by the validate() method.
mock_token_validate.return_value.token = '123-456-789'
mock_token_validate.return_value.id = 1
sock = mock.MagicMock(
spec=websocketproxy.TenantSock)
self.server.security_proxy.connect.return_value = sock
self.wh.new_websocket_client()
self.wh.do_proxy.assert_called_with(sock)
mock_finish.assert_called_with()
self.assertEqual(len(mock_close.calls), 0)
@mock.patch('nova.objects.ConsoleAuthToken.validate')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.compute.rpcapi.ComputeAPI.validate_console_port')
@mock.patch('nova.console.websocketproxy.TenantSock.close')
@mock.patch('nova.console.websocketproxy.TenantSock.finish_up')
def test_proxy_connect_err(self, mock_finish, mock_close,
mock_port_validate, mock_get,
mock_token_validate):
mock_token_validate.return_value = nova.objects.ConsoleAuthToken(
instance_uuid=uuids.instance, host='node1', port='10000',
console_type='novnc', access_url_base='https://example.net:6080')
# The token attribute is set by the validate() method.
mock_token_validate.return_value.token = '123-456-789'
mock_token_validate.return_value.id = 1
ex = exception.SecurityProxyNegotiationFailed("Wibble")
self.server.security_proxy.connect.side_effect = ex
self.assertRaises(exception.SecurityProxyNegotiationFailed,
self.wh.new_websocket_client)
self.assertEqual(len(self.wh.do_proxy.calls), 0)
mock_close.assert_called_with()
self.assertEqual(len(mock_finish.calls), 0)
| apache-2.0 | -5,748,044,044,201,525,000 | 38.450462 | 79 | 0.59108 | false |
jeffrey4l/nova | nova/openstack/common/report/models/conf.py | 25 | 2380 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides OpenStack Configuration Model
This module defines a class representing the data
model for :mod:`oslo_config` configuration options
"""
from nova.openstack.common.report.models import with_default_views as mwdv
from nova.openstack.common.report.views.text import generic as generic_text_views
class ConfigModel(mwdv.ModelWithDefaultViews):
"""A Configuration Options Model
This model holds data about a set of configuration options
from :mod:`oslo_config`. It supports both the default group
of options and named option groups.
:param conf_obj: a configuration object
:type conf_obj: :class:`oslo_config.cfg.ConfigOpts`
"""
def __init__(self, conf_obj):
kv_view = generic_text_views.KeyValueView(dict_sep=": ",
before_dict='')
super(ConfigModel, self).__init__(text_view=kv_view)
def opt_title(optname, co):
return co._opts[optname]['opt'].name
def opt_value(opt_obj, value):
if opt_obj['opt'].secret:
return '***'
else:
return value
self['default'] = dict(
(opt_title(optname, conf_obj),
opt_value(conf_obj._opts[optname], conf_obj[optname]))
for optname in conf_obj._opts
)
groups = {}
for groupname in conf_obj._groups:
group_obj = conf_obj._groups[groupname]
curr_group_opts = dict(
(opt_title(optname, group_obj),
opt_value(group_obj._opts[optname],
conf_obj[groupname][optname]))
for optname in group_obj._opts)
groups[group_obj.name] = curr_group_opts
self.update(groups)
| apache-2.0 | 5,943,051,504,569,638,000 | 35.060606 | 81 | 0.62479 | false |
saurabh6790/test-med-lib | webnotes/boot.py | 22 | 3990 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
bootstrap client session
"""
import webnotes
import webnotes.defaults
import webnotes.model.doc
import webnotes.widgets.page
import json
import webnotes.webutils
def get_bootinfo():
"""build and return boot info"""
bootinfo = webnotes._dict()
doclist = []
# profile
get_profile(bootinfo)
# control panel
cp = webnotes.model.doc.getsingle('Control Panel')
# system info
bootinfo['control_panel'] = webnotes._dict(cp.copy())
bootinfo['sysdefaults'] = webnotes.defaults.get_defaults()
bootinfo['server_date'] = webnotes.utils.nowdate()
bootinfo["send_print_in_body_and_attachment"] = webnotes.conn.get_value("Email Settings",
None, "send_print_in_body_and_attachment")
if webnotes.session['user'] != 'Guest':
bootinfo['user_info'] = get_fullnames()
bootinfo['sid'] = webnotes.session['sid'];
# home page
bootinfo.modules = webnotes.get_config().modules
bootinfo.hidden_modules = webnotes.conn.get_global("hidden_modules")
bootinfo.doctype_icons = dict(webnotes.conn.sql("""select name, icon from
tabDocType where ifnull(icon,'')!=''"""))
bootinfo.doctype_icons.update(dict(webnotes.conn.sql("""select name, icon from
tabPage where ifnull(icon,'')!=''""")))
add_home_page(bootinfo, doclist)
add_allowed_pages(bootinfo)
load_translations(bootinfo)
load_conf_settings(bootinfo)
# ipinfo
if webnotes.session['data'].get('ipinfo'):
bootinfo['ipinfo'] = webnotes.session['data']['ipinfo']
# add docs
bootinfo['docs'] = doclist
# plugins
try:
import startup.boot
startup.boot.boot_session(bootinfo)
except ImportError:
pass
from webnotes.model.utils import compress
bootinfo['docs'] = compress(bootinfo['docs'])
# deal with __slots__ in lang
if bootinfo.lang:
bootinfo.lang = unicode(bootinfo.lang)
bootinfo.metadata_version = webnotes.cache().get_value("metadata_version")
if not bootinfo.metadata_version:
bootinfo.metadata_version = webnotes.reset_metadata_version()
return bootinfo
def load_conf_settings(bootinfo):
from webnotes import conf
for key in ['developer_mode']:
if key in conf: bootinfo[key] = conf.get(key)
def add_allowed_pages(bootinfo):
bootinfo.page_info = dict(webnotes.conn.sql("""select distinct parent, modified from `tabPage Role`
where role in ('%s')""" % "', '".join(webnotes.get_roles())))
def load_translations(bootinfo):
webnotes.set_user_lang(webnotes.session.user)
if webnotes.lang != 'en':
from webnotes.translate import get_lang_data
from webnotes.utils import get_path
# framework
bootinfo["__messages"] = get_lang_data(get_path("lib","public", "js", "wn"), None, "js")
# doctype and module names
bootinfo["__messages"].update(get_lang_data(get_path("app","public", "js"), None, "js"))
bootinfo["lang"] = webnotes.lang
def get_fullnames():
"""map of user fullnames"""
ret = webnotes.conn.sql("""select name,
concat(ifnull(first_name, ''),
if(ifnull(last_name, '')!='', ' ', ''), ifnull(last_name, '')),
user_image, gender, email
from tabProfile where ifnull(enabled, 0)=1""", as_list=1)
d = {}
for r in ret:
if not r[2]:
r[2] = 'lib/images/ui/avatar.png'
else:
r[2] = r[2]
d[r[0]]= {'fullname': r[1], 'image': r[2], 'gender': r[3],
'email': r[4] or r[0]}
return d
def get_profile(bootinfo):
"""get profile info"""
bootinfo['profile'] = webnotes.user.load_profile()
def add_home_page(bootinfo, doclist):
"""load home page"""
if webnotes.session.user=="Guest":
return
home_page = webnotes.get_application_home_page(webnotes.session.user)
try:
page_doclist = webnotes.widgets.page.get(home_page)
except (webnotes.DoesNotExistError, webnotes.PermissionError), e:
page_doclist = webnotes.widgets.page.get('desktop')
bootinfo['home_page_html'] = page_doclist[0].content
bootinfo['home_page'] = page_doclist[0].name
doclist += page_doclist
| mit | -4,090,132,981,585,524,700 | 28.124088 | 100 | 0.696241 | false |
zentralopensource/zentral | zentral/utils/model_extras.py | 1 | 1378 | from collections import namedtuple
RelatedObjects = namedtuple('RelatedObjects',
["name", "concrete", "to_name", "to_model", "objects", "objects_count"])
def find_all_related_objects(obj):
for field in obj._meta.get_fields():
if not field.is_relation:
continue
t = [field.name, field.concrete]
# concrete or not
if field.concrete:
t.extend([None, field.related_model])
else:
t.extend([field.field.name, field.field.model])
# get the related objects
if field.many_to_one:
related_obj = getattr(obj, field.name)
if related_obj is not None:
t.extend([[related_obj], 1])
else:
continue
elif field.one_to_one:
try:
t.extend([[getattr(obj, field.name)], 1])
except field.field.model.DoesNotExist:
continue
else:
# many to many or one to many
if field.concrete:
qs = getattr(obj, field.name)
else:
qs = getattr(obj, field.get_accessor_name())
objects_count = qs.count()
if not objects_count:
continue
else:
t.extend([qs.all(), objects_count])
yield RelatedObjects._make(t)
| apache-2.0 | -7,573,938,446,816,355,000 | 32.609756 | 100 | 0.515239 | false |
openmaraude/APITaxi | APITaxi2/tests/test_customers.py | 1 | 2277 | from APITaxi_models2.unittest.factories import CustomerFactory
class TestEditCustomers:
def test_invalid(self, anonymous, operateur, moteur):
# Login required
resp = anonymous.client.put('/customers/xxx', json={})
assert resp.status_code == 401
# Permission denied
resp = operateur.client.put('/customers/xxx', json={})
assert resp.status_code == 403
# No data
resp = moteur.client.put('/customers/xxx', json={})
assert resp.status_code == 400
assert 'data' in resp.json['errors']
# Empty data
resp = moteur.client.put('/customers/xxx', json={'data': []})
assert resp.status_code == 400
assert 'data' in resp.json['errors']
# Invalid types
resp = moteur.client.put('/customers/xxx', json={'data': [{
'reprieve_begin': 'xxx',
'reprieve_end': 'xxx',
'ban_begin': 'xxx',
'ban_end': 'xxx'
}]})
assert resp.status_code == 400
for field in (
'reprieve_begin', 'reprieve_end',
'ban_begin', 'ban_end'
):
assert field in resp.json['errors'].get('data', {}).get('0', {})
# Data ok, but invalid client
resp = moteur.client.put('/customers/xxx', json={'data': [{}]})
assert resp.status_code == 404
assert 'url' in resp.json['errors']
def test_edit(self, moteur, QueriesTracker):
customer = CustomerFactory(added_by=moteur.user)
# Empty
with QueriesTracker() as qtrack:
resp = moteur.client.put('/customers/%s' % customer.id, json={
'data': [{
'reprieve_begin': '2001-01-01 01:01:01',
'reprieve_end': '2002-02-02 02:02:02',
'ban_begin': '2003-03-03 03:03:03',
'ban_end': '2004-04-04 04:04:04'
}]
})
# SELECT permissions, SELECT customer, UPDATE customer
assert qtrack.count == 3
assert resp.status_code == 200
assert customer.reprieve_begin.year == 2001
assert customer.reprieve_end.year == 2002
assert customer.ban_begin.year == 2003
assert customer.ban_end.year == 2004
| agpl-3.0 | -8,230,134,203,105,731,000 | 35.142857 | 76 | 0.542819 | false |
cpennington/edx-platform | openedx/core/djangoapps/schedules/management/commands/tests/upsell_base.py | 4 | 4091 | """
Base file for testing schedules with upsell
"""
import datetime
import itertools
from collections import namedtuple
import ddt
from edx_ace.message import Message
from edx_ace.utils.date import serialize
from freezegun import freeze_time
from mock import PropertyMock, patch
from lms.djangoapps.courseware.models import DynamicUpgradeDeadlineConfiguration
@ddt.ddt
@freeze_time('2017-08-01 00:00:00', tz_offset=0, tick=True)
class ScheduleUpsellTestMixin(object):
UpsellTestCase = namedtuple('UpsellTestCase', 'set_deadline, deadline_offset, expect_upsell')
def _setup_schedule_and_dates(self, set_deadline=True, deadline_offset=7):
"""
Creates and returns a schedule according to the provided upsell deadline values.
Also returns the offset and target_day as computed for messaging.
"""
current_day, offset, target_day, _ = self._get_dates()
upgrade_deadline = None
if set_deadline:
upgrade_deadline = current_day + datetime.timedelta(days=deadline_offset)
schedule = self._schedule_factory(
upgrade_deadline=upgrade_deadline
)
return schedule, offset, target_day
def _send_message_task(self, schedule, offset, target_day):
"""
Calls the task for sending a message to the given schedule and for the given
offset and target_day. Returns the message that would have been sent.
"""
sent_messages = []
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args[1])
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset,
bin_num=self._calculate_bin_for_user(schedule.enrollment.user),
))
self.assertEqual(len(sent_messages), 1)
return Message.from_string(sent_messages[0])
def _contains_upsell(self, message):
"""
Returns whether the given message would contain upsell text.
"""
return message.context["show_upsell"]
@ddt.data(
*itertools.product(
(True, False), # enable DynamicUpgradeDeadlineConfiguration
(
UpsellTestCase(set_deadline=False, deadline_offset=None, expect_upsell=False), # no deadline
UpsellTestCase(set_deadline=True, deadline_offset=-7, expect_upsell=False), # deadline expired
UpsellTestCase(set_deadline=True, deadline_offset=7, expect_upsell=True), # deadline in future
)
)
)
@ddt.unpack
def test_upsell(self, enable_config, testcase):
# Make sure the new entry in the config model has a time
# later than the frozen time for it to be effective.
with freeze_time('2017-08-01 01:00:00'):
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=enable_config)
schedule, offset, target_day = self._setup_schedule_and_dates(
set_deadline=testcase.set_deadline,
deadline_offset=testcase.deadline_offset,
)
message = self._send_message_task(schedule, offset, target_day)
found_upsell = self._contains_upsell(message)
expect_upsell = enable_config and testcase.expect_upsell
self.assertEqual(found_upsell, expect_upsell)
@ddt.data('es', 'es-es', 'es-419')
def test_upsell_translated(self, course_language):
schedule, offset, target_day = self._setup_schedule_and_dates()
with patch(
'openedx.core.djangoapps.content.course_overviews.models.CourseOverview.closest_released_language',
new_callable=PropertyMock
) as mock_course_language:
mock_course_language.return_value = course_language
message = self._send_message_task(schedule, offset, target_day)
self.assertEqual(
message.context['user_schedule_upgrade_deadline_time'],
u'8 de agosto de 2017',
)
| agpl-3.0 | 7,502,069,423,782,273,000 | 39.107843 | 115 | 0.657052 | false |
svartalf/python-opus | opus/decoder.py | 4 | 1853 | """High-level interface to a opus.api.decoder functions"""
from opus.api import decoder, ctl
class Decoder(object):
def __init__(self, fs, channels):
"""
Parameters:
fs : sampling rate
channels : number of channels
"""
self._fs = fs
self._channels = channels
self._state = decoder.create(fs, channels)
def __del__(self):
if hasattr(self, '_state'):
# Destroying state only if __init__ completed successfully
decoder.destroy(self._state)
def reset_state(self):
"""Resets the codec state to be equivalent to a freshly initialized state"""
decoder.ctl(self._state, ctl.reset_state)
def decode(self, data, frame_size, decode_fec=False):
return decoder.decode(self._state, data, len(data), frame_size, decode_fec, channels=self._channels)
def decode_float(self, data, frame_size, decode_fec=False):
return decoder.decode_float(self._state, data, len(data), frame_size, decode_fec, channels=self._channels)
# CTL interfaces
_get_final_range = lambda self: decoder.ctl(self._state, ctl.get_final_range)
final_range = property(_get_final_range)
_get_bandwidth = lambda self: decoder.ctl(self._state, ctl.get_bandwidth)
bandwidth = property(_get_bandwidth)
_get_pitch = lambda self: decoder.ctl(self._state, ctl.get_pitch)
pitch = property(_get_pitch)
_get_lsb_depth = lambda self: decoder.ctl(self._state, ctl.get_lsb_depth)
_set_lsb_depth = lambda self, x: decoder.ctl(self._state, ctl.set_lsb_depth, x)
lsb_depth = property(_get_lsb_depth, _set_lsb_depth)
_get_gain = lambda self: decoder.ctl(self._state, ctl.get_gain)
_set_gain = lambda self, x: decoder.ctl(self._state, ctl.set_gain, x)
gain = property(_get_gain, _set_gain)
| bsd-3-clause | 8,219,980,433,603,259,000 | 30.40678 | 114 | 0.642742 | false |
TrackMaven/celery-once | tests/unit/test_helpers.py | 6 | 2496 | # -*- coding: utf-8 -*-
from celery_once.helpers import queue_once_key, kwargs_to_list, force_string
import pytest
import six
def test_force_string_1():
assert force_string('a') == 'a'
def test_force_string_2():
assert force_string(u'a') == 'a'
def test_force_string_3():
assert force_string('é') == 'é'
def test_force_string_4():
assert force_string(u'é') == 'é'
def test_kwargs_to_list_empty():
keys = kwargs_to_list({})
assert keys == []
def test_kwargs_to_list_1():
keys = kwargs_to_list({'int': 1})
assert keys == ["int-1"]
def test_kwargs_to_list_2():
keys = kwargs_to_list({'int': 1, 'boolean': True})
assert keys == ["boolean-True", "int-1"]
def test_kwargs_to_list_3():
keys = kwargs_to_list({'int': 1, 'boolean': True, 'str': "abc"})
assert keys == ["boolean-True", "int-1", "str-abc"]
def test_kwargs_to_list_4():
keys = kwargs_to_list(
{'int': 1, 'boolean': True, 'str': 'abc', 'list': [1, '2']})
assert keys == ["boolean-True", "int-1", "list-[1, '2']", "str-abc"]
@pytest.mark.skipif(six.PY3, reason='requires python 2')
def test_kwargs_to_list_5():
keys = kwargs_to_list(
{'a': {u'é': 'c'}, 'b': [u'a', 'é'], u'c': 1, 'd': 'é', 'e': u'é'})
assert keys == [
"a-{'\\xc3\\xa9': 'c'}",
"b-['a', '\\xc3\\xa9']",
"c-1",
"d-\xc3\xa9",
"e-\xc3\xa9",
]
@pytest.mark.skipif(six.PY2, reason='requires python 3')
def test_kwargs_to_list_6():
keys = kwargs_to_list(
{'a': {u'é': 'c'}, 'b': [u'a', 'é'], u'c': 1, 'd': 'é', 'e': u'é'})
assert keys == ["a-{'é': 'c'}", "b-['a', 'é']", "c-1", "d-é", 'e-é']
def test_queue_once_key():
key = queue_once_key("example", {})
assert key == "qo_example"
def test_queue_once_key_kwargs():
key = queue_once_key("example", {'pk': 10})
assert key == "qo_example_pk-10"
def test_queue_once_key_kwargs_restrict_keys():
key = queue_once_key("example", {'pk': 10, 'id': 10}, restrict_to=['pk'])
assert key == "qo_example_pk-10"
@pytest.mark.skipif(six.PY3, reason='requires python 2')
def test_queue_once_key_unicode_py2():
key = queue_once_key(u"éxample", {'a': u'é', u'b': 'é'})
assert key == "qo_\xc3\xa9xample_a-\xc3\xa9_b-\xc3\xa9"
@pytest.mark.skipif(six.PY2, reason='requires python 3')
def test_queue_once_key_unicode_py3():
key = queue_once_key(u"éxample", {'a': u'é', u'b': 'é'})
assert key == "qo_éxample_a-é_b-é"
| bsd-2-clause | 2,580,298,836,659,329,000 | 25.287234 | 77 | 0.551599 | false |
GuardianRG/CuckooSploit | web/web/settings.py | 6 | 5410 | # Copyright (C) 2010-2015 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import sys
import os
# Cuckoo path.
CUCKOO_PATH = os.path.join(os.getcwd(), "..")
sys.path.append(CUCKOO_PATH)
from lib.cuckoo.common.config import Config
cfg = Config("reporting").mongodb
# Checks if mongo reporting is enabled in Cuckoo.
if not cfg.get("enabled"):
raise Exception("Mongo reporting module is not enabled in cuckoo, aborting!")
# Get connection options from reporting.conf.
MONGO_HOST = cfg.get("host", "127.0.0.1")
MONGO_PORT = cfg.get("port", 27017)
MONGO_DB = cfg.get("db", "cuckoo")
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Database settings. We don't need it.
DATABASES = {}
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# Disabling time zone support and using local time for web interface and storage.
# See: https://docs.djangoproject.com/en/1.5/ref/settings/#time-zone
USE_TZ = False
TIME_ZONE = None
# Unique secret key generator.
# Secret key will be placed in secret_key.py file.
try:
from secret_key import *
except ImportError:
SETTINGS_DIR=os.path.abspath(os.path.dirname(__file__))
# Using the same generation schema of Django startproject.
from django.utils.crypto import get_random_string
key = get_random_string(50, "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)")
# Write secret_key.py
with open(os.path.join(SETTINGS_DIR, "secret_key.py"), "w") as key_file:
key_file.write("SECRET_KEY = \"{0}\"".format(key))
# Reload key.
from secret_key import *
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.getcwd(), 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Cuckoo headers.
"web.headers.CuckooHeaders",
)
ROOT_URLCONF = 'web.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'web.wsgi.application'
TEMPLATE_DIRS = (
"templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
#'django.contrib.sites',
#'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'analysis',
'compare',
)
LOGIN_REDIRECT_URL = "/"
# Fix to avoid migration warning in django 1.7 about test runner (1_6.W001).
# In future it could be removed: https://code.djangoproject.com/ticket/23469
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Hack to import local settings.
try:
LOCAL_SETTINGS
except NameError:
try:
from local_settings import *
except ImportError:
pass
| gpl-3.0 | 3,050,867,723,040,912,400 | 29.393258 | 85 | 0.697043 | false |
nicozhang/pyspider | tests/test_webdav.py | 1 | 4204 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<[email protected]>
# http://binux.me
# Created on 2015-06-03 21:15
import os
import six
import time
import shutil
import inspect
import unittest2 as unittest
from six import BytesIO
from pyspider import run
from pyspider.libs import utils
from tests import data_sample_handler, data_handler
class TestWebDav(unittest.TestCase):
@classmethod
def setUpClass(self):
import easywebdav
shutil.rmtree('./data/tests', ignore_errors=True)
os.makedirs('./data/tests')
ctx = run.cli.make_context('test', [
'--taskdb', 'sqlite+taskdb:///data/tests/task.db',
'--projectdb', 'sqlite+projectdb:///data/tests/projectdb.db',
'--resultdb', 'sqlite+resultdb:///data/tests/resultdb.db',
], None, obj=utils.ObjectDict(testing_mode=True))
self.ctx = run.cli.invoke(ctx)
ctx = run.webui.make_context('webui', [
'--username', 'binux',
'--password', '4321',
], self.ctx)
self.app = run.webui.invoke(ctx)
self.app_thread = utils.run_in_thread(self.app.run)
time.sleep(5)
self.webdav = easywebdav.connect('localhost', port=5000, path='dav')
self.webdav_up = easywebdav.connect('localhost', port=5000, path='dav',
username='binux', password='4321')
@classmethod
def tearDownClass(self):
for each in self.ctx.obj.instances:
each.quit()
self.app_thread.join()
time.sleep(1)
assert not utils.check_port_open(5000)
assert not utils.check_port_open(23333)
assert not utils.check_port_open(24444)
assert not utils.check_port_open(25555)
assert not utils.check_port_open(14887)
shutil.rmtree('./data/tests', ignore_errors=True)
def test_10_ls(self):
self.assertEqual(len(self.webdav.ls()), 1)
def test_20_create_error(self):
import easywebdav
with self.assertRaises(easywebdav.OperationFailed):
self.webdav.upload(inspect.getsourcefile(data_sample_handler),
'bad_file_name')
with self.assertRaises(easywebdav.OperationFailed):
self.webdav.upload(inspect.getsourcefile(data_sample_handler),
'bad.file.name')
def test_30_create_ok(self):
self.webdav.upload(inspect.getsourcefile(data_handler), 'handler.py')
self.webdav.upload(inspect.getsourcefile(data_sample_handler), 'sample_handler.py')
self.assertEqual(len(self.webdav.ls()), 3)
def test_40_get_404(self):
io = BytesIO()
import easywebdav
with self.assertRaises(easywebdav.OperationFailed):
self.webdav.download('not_exitst', io)
io.close()
def test_50_get(self):
io = BytesIO()
self.webdav.download('handler.py', io)
self.assertEqual(utils.text(inspect.getsource(data_handler)), utils.text(io.getvalue()))
io.close()
io = BytesIO()
self.webdav.download('sample_handler.py', io)
self.assertEqual(utils.text(inspect.getsource(data_sample_handler)), utils.text(io.getvalue()))
io.close()
def test_60_edit(self):
self.webdav.upload(inspect.getsourcefile(data_handler), 'sample_handler.py')
def test_70_get(self):
io = BytesIO()
self.webdav.download('sample_handler.py', io)
self.assertEqual(utils.text(inspect.getsource(data_handler)), utils.text(io.getvalue()))
io.close()
def test_80_password(self):
import requests
rv = requests.post('http://localhost:5000/update', data={
'name': 'group',
'value': 'lock',
'pk': 'sample_handler',
})
self.assertEqual(rv.status_code, 200)
import easywebdav
with self.assertRaises(easywebdav.OperationFailed):
self.webdav.upload(inspect.getsourcefile(data_sample_handler), 'sample_handler.py')
self.webdav_up.upload(inspect.getsourcefile(data_sample_handler), 'sample_handler.py')
| apache-2.0 | 8,990,342,809,478,623,000 | 34.327731 | 103 | 0.618459 | false |
Tayamarn/socorro | webapp-django/crashstats/crashstats/tests/test_utils.py | 1 | 21058 | import copy
import datetime
from cStringIO import StringIO
from unittest import TestCase
import json
from django.http import HttpResponse
from django.test.client import RequestFactory
from crashstats.crashstats import utils
class TestUtils(TestCase):
def test_daterange(self):
format = '%Y-%m-%d'
start_date = datetime.datetime.strptime('2012-01-01', format)
end_date = datetime.datetime.strptime('2012-01-05', format)
expected = [
'2012-01-01',
'2012-01-02',
'2012-01-03',
'2012-01-04'
]
for i, d in enumerate(utils.daterange(start_date, end_date, format)):
assert d == expected[i]
def test_enhance_frame(self):
vcs_mappings = {
'hg': {
'hg.m.org': ('http://hg.m.org/'
'%(repo)s/annotate/%(revision)s'
'/%(file)s#l%(line)s')
}
}
# Test with a file that uses a vcs_mapping.
# Also test function sanitizing.
actual = {
'frame': 0,
'module': 'bad.dll',
'function': 'Func(A * a,B b)',
'file': 'hg:hg.m.org/repo/name:dname/fname:rev',
'line': 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'function': 'Func(A* a, B b)',
'short_signature': 'Func',
'line': 576,
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/dname/fname#l576'),
'file': 'dname/fname',
'frame': 0,
'signature': 'Func(A* a, B b)',
'module': 'bad.dll',
}
assert actual == expected
# Now with a file that has VCS info but isn't in vcs_mappings.
actual = {
'frame': 0,
'module': 'bad.dll',
'function': 'Func',
'file': 'git:git.m.org/repo/name:dname/fname:rev',
'line': 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'function': 'Func',
'short_signature': 'Func',
'line': 576,
'file': 'fname',
'frame': 0,
'signature': 'Func',
'module': 'bad.dll',
}
assert actual == expected
# Test with no VCS info at all.
actual = {
'frame': 0,
'module': 'bad.dll',
'function': 'Func',
'file': '/foo/bar/file.c',
'line': 576,
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'function': 'Func',
'short_signature': 'Func',
'line': 576,
'file': '/foo/bar/file.c',
'frame': 0,
'signature': 'Func',
'module': 'bad.dll',
}
assert actual == expected
# Test with no source info at all.
actual = {
'frame': 0,
'module': 'bad.dll',
'function': 'Func',
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'function': 'Func',
'short_signature': 'Func',
'frame': 0,
'signature': 'Func',
'module': 'bad.dll',
}
assert actual == expected
# Test with no function info.
actual = {
'frame': 0,
'module': 'bad.dll',
'module_offset': '0x123',
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'short_signature': 'bad.dll@0x123',
'frame': 0,
'signature': 'bad.dll@0x123',
'module': 'bad.dll',
'module_offset': '0x123',
}
assert actual == expected
# Test with no module info.
actual = {
'frame': 0,
'offset': '0x1234',
}
utils.enhance_frame(actual, vcs_mappings)
expected = {
'short_signature': '@0x1234',
'frame': 0,
'signature': '@0x1234',
'offset': '0x1234',
}
assert actual == expected
def test_enhance_frame_s3_generated_sources(self):
"""Test a specific case when the frame references a S3 vcs
and the file contains a really long sha string"""
original_frame = {
'file': (
's3:gecko-generated-sources:36d62ce2ec2925f4a13e44fe534b246c23b'
'4b3d5407884d3bbfc9b0d9aebe4929985935ae582704c06e994ece0d1e7652'
'8ff1edf4543e400d0aaa8f7251b15ca/ipc/ipdl/PCompositorBridgeChild.cpp:'
),
'frame': 22,
'function': (
'mozilla::layers::PCompositorBridgeChild::OnMessageReceived(IP'
'C::Message const&)'
),
'function_offset': '0xd9d',
'line': 1495,
'module': 'XUL',
'module_offset': '0x7c50bd',
'normalized': 'mozilla::layers::PCompositorBridgeChild::OnMessageReceived',
'offset': '0x108b7b0bd',
'short_signature': 'mozilla::layers::PCompositorBridgeChild::OnMessageReceived',
'signature': (
'mozilla::layers::PCompositorBridgeChild::OnMessageReceived(IP'
'C::Message const&)'
),
'trust': 'cfi'
}
# Remember, enhance_frame() mutates the dict.
frame = copy.copy(original_frame)
utils.enhance_frame(frame, {})
# Because it can't find a mapping in 'vcs_mappings', the frame's
# 'file', the default behavior is to extract just the file's basename.
frame['file'] = 'PCompositorBridgeChild.cpp'
# Try again, now with 's3' in vcs_mappings.
frame = copy.copy(original_frame)
utils.enhance_frame(frame, {
's3': {
'gecko-generated-sources': (
'https://example.com/%(file)s#L-%(line)s'
),
},
})
# There's a new key in the frame now. This is what's used in the
# <a href> in the HTML.
assert frame['source_link']
expected = (
'https://example.com/36d62ce2ec2925f4a13e44fe534b246c23b4b3d540788'
'4d3bbfc9b0d9aebe4929985935ae582704c06e994ece0d1e76528ff1edf4543e4'
'00d0aaa8f7251b15ca/ipc/ipdl/PCompositorBridgeChild.cpp#L-1495'
)
assert frame['source_link'] == expected
# And that links text is the frame's 'file' but without the 128 char
# sha.
assert frame['file'] == 'ipc/ipdl/PCompositorBridgeChild.cpp'
def test_enhance_json_dump(self):
vcs_mappings = {
'hg': {
'hg.m.org': ('http://hg.m.org/'
'%(repo)s/annotate/%(revision)s'
'/%(file)s#l%(line)s')
}
}
actual = {'threads':
[{'frames':
[
{'frame': 0,
'module': 'bad.dll',
'function': 'Func',
'file': 'hg:hg.m.org/repo/name:dname/fname:rev',
'line': 576},
{'frame': 1,
'module': 'another.dll',
'function': 'Func2',
'file': 'hg:hg.m.org/repo/name:dname/fname:rev',
'line': 576}
]},
{'frames':
[
{'frame': 0,
'module': 'bad.dll',
'function': 'Func',
'file': 'hg:hg.m.org/repo/name:dname/fname:rev',
'line': 576},
{'frame': 1,
'module': 'another.dll',
'function': 'Func2',
'file': 'hg:hg.m.org/repo/name:dname/fname:rev',
'line': 576}
]}]}
utils.enhance_json_dump(actual, vcs_mappings)
expected = {'threads':
[{'thread': 0,
'frames':
[{'frame': 0,
'function': 'Func',
'short_signature': 'Func',
'line': 576,
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/dname/fname#l576'),
'file': 'dname/fname',
'signature': 'Func',
'module': 'bad.dll'},
{'frame': 1,
'module': 'another.dll',
'function': 'Func2',
'signature': 'Func2',
'short_signature': 'Func2',
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/dname/fname#l576'),
'file': 'dname/fname',
'line': 576}]},
{'thread': 1,
'frames':
[{'frame': 0,
'function': 'Func',
'short_signature': 'Func',
'line': 576,
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/dname/fname#l576'),
'file': 'dname/fname',
'signature': 'Func',
'module': 'bad.dll'},
{'frame': 1,
'module': 'another.dll',
'function': 'Func2',
'signature': 'Func2',
'short_signature': 'Func2',
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/dname/fname#l576'),
'file': 'dname/fname',
'line': 576}]}]}
assert actual == expected
def test_parse_dump(self):
dump = (
'OS|Windows NT|6.1.7601 Service Pack 1\n'
'CPU|x86|GenuineIntel family 15 model 4 stepping 9|2\n'
'Crash|EXCEPTION_ACCESS_VIOLATION_READ|0x290|0\n'
'Module|bad.exe|1.0.0.1234|debug.pdb|debugver|saddr|eaddr|1\n'
'\n'
'0|0|bad.dll|signature|cvs:cvs.m.org/repo:fname:rev|576|0x0\n'
'0|1|bad.dll|signature|hg:hg.m.org/repo/name:fname:rev|576|0x0\n'
'1|0|ntdll.dll|KiFastSystemCallRet|||0x0\n'
'1|1|ntdll.dll|ZwClose|||0xb\n'
'1|2|ntdll.dll||||0xabc\n'
'1|3|||||0x1234\n'
)
vcs_mappings = {
'cvs': {
'cvs.m.org': ('http://bonsai.m.org/cvsblame.cgi?'
'file=%(file)s&rev=%(revision)s&'
'mark=%(line)s#%(line)s')
},
'hg': {
'hg.m.org': ('http://hg.m.org/'
'%(repo)s/annotate/%(revision)s'
'/%(file)s#l%(line)s')
}
}
actual = utils.parse_dump(dump, vcs_mappings)
expected = {
'status': 'OK',
'system_info': {
'os': 'Windows NT',
'os_ver': '6.1.7601 Service Pack 1',
'cpu_arch': 'x86',
'cpu_info': 'GenuineIntel family 15 model 4 stepping 9',
'cpu_count': 2},
'crash_info': {
'crashing_thread': 0,
'crash_address': '0x290',
'type': 'EXCEPTION_ACCESS_VIOLATION_READ'},
'main_module': 0,
'modules': [
{'debug_file': 'debug.pdb',
'version': '1.0.0.1234',
'debug_id': 'debugver',
'filename': 'bad.exe',
'base_addr': 'saddr',
'end_addr': 'eaddr'}],
'thread_count': 2,
'threads': [
{'thread': 0,
'frame_count': 2,
'frames': [
{'function': 'signature',
'short_signature': 'signature',
'line': 576,
'source_link': ('http://bonsai.m.org/'
'cvsblame.cgi?file=fname&'
'rev=rev&mark=576#576'),
'file': 'fname',
'frame': 0,
'signature': 'signature',
'module': 'bad.dll'},
{'function': 'signature',
'short_signature': 'signature',
'line': 576,
'source_link': ('http://hg.m.org/repo/name/'
'annotate/rev/fname#l576'),
'file': 'fname',
'frame': 1,
'signature': 'signature',
'module': 'bad.dll'}
]},
{'thread': 1,
'frame_count': 4,
'frames': [
{'function': 'KiFastSystemCallRet',
'short_signature': 'KiFastSystemCallRet',
'function_offset': '0x0',
'frame': 0,
'signature': 'KiFastSystemCallRet',
'module': 'ntdll.dll'},
{'function': 'ZwClose',
'short_signature': 'ZwClose',
'function_offset': '0xb',
'frame': 1,
'signature': 'ZwClose',
'module': 'ntdll.dll'},
{'signature': 'ntdll.dll@0xabc',
'short_signature': 'ntdll.dll@0xabc',
'module_offset': '0xabc',
'frame': 2,
'module': 'ntdll.dll'},
{'offset': '0x1234',
'frame': 3,
'signature': '@0x1234',
'short_signature': '@0x1234'}]}]
}
# the default line length for assert would be too short to be useful
self.maxDiff = None
assert actual == expected
def test_parse_dump_invalid_frames(self):
"""What's special about this one is that the dump is bad in that
it starts wiht a 2 but there's no 0 or 1.
So what the parse_dump() function does is that it pads everything
to the left with blocks of empty frames.
See https://bugzilla.mozilla.org/show_bug.cgi?id=1071043
"""
dump = (
'OS|Windows NT|6.1.7601 Service Pack 1\n'
'CPU|x86|GenuineIntel family 15 model 4 stepping 9|2\n'
'Crash|EXCEPTION_ACCESS_VIOLATION_READ|0x290|0\n'
'Module|bad.exe|1.0.0.1234|debug.pdb|debugver|saddr|eaddr|1\n'
'\n'
'2|0|bad.dll|signature|cvs:cvs.m.org/repo:fname:rev|576|0x0\n'
)
vcs_mappings = {
'cvs': {
'cvs.m.org': ('http://bonsai.m.org/cvsblame.cgi?'
'file=%(file)s&rev=%(revision)s&'
'mark=%(line)s#%(line)s')
},
'hg': {
'hg.m.org': ('http://hg.m.org/'
'%(repo)s/annotate/%(revision)s'
'/%(file)s#l%(line)s')
}
}
actual = utils.parse_dump(dump, vcs_mappings)
expected = {
'crash_info': {
'crash_address': '0x290',
'crashing_thread': 0,
'type': 'EXCEPTION_ACCESS_VIOLATION_READ'
},
'main_module': 0,
'modules': [{
'base_addr': 'saddr',
'debug_file': 'debug.pdb',
'debug_id': 'debugver',
'end_addr': 'eaddr',
'filename': 'bad.exe',
'version': '1.0.0.1234'
}],
'status': 'OK',
'system_info': {
'cpu_arch': 'x86',
'cpu_count': 2,
'cpu_info': 'GenuineIntel family 15 model 4 stepping 9',
'os': 'Windows NT',
'os_ver': '6.1.7601 Service Pack 1'
},
'thread_count': 3,
'threads': [
{
'frame_count': 0,
'frames': [],
'thread': 0
},
{
'frame_count': 0,
'frames': [],
'thread': 1
},
{
'frame_count': 1,
'frames': [{
'file': 'fname',
'frame': 0,
'function': 'signature',
'line': 576,
'module': 'bad.dll',
'short_signature': 'signature',
'signature': 'signature',
'source_link': (
'http://bonsai.m.org/cvsblame.cgi?file=fname&'
'rev=rev&mark=576#576'
)
}],
'thread': 2
},
]
}
# the default line length for assert would be too short to be useful
self.maxDiff = None
assert actual == expected
def test_find_crash_id(self):
# A good string, no prefix
input_str = '1234abcd-ef56-7890-ab12-abcdef130802'
crash_id = utils.find_crash_id(input_str)
assert crash_id == input_str
# A good string, with prefix
input_str = 'bp-1234abcd-ef56-7890-ab12-abcdef130802'
crash_id = utils.find_crash_id(input_str)
assert crash_id == '1234abcd-ef56-7890-ab12-abcdef130802'
# A good looking string but not a real day
input_str = '1234abcd-ef56-7890-ab12-abcdef130230' # Feb 30th 2013
assert not utils.find_crash_id(input_str)
input_str = 'bp-1234abcd-ef56-7890-ab12-abcdef130230'
assert not utils.find_crash_id(input_str)
# A bad string, one character missing
input_str = 'bp-1234abcd-ef56-7890-ab12-abcdef12345'
assert not utils.find_crash_id(input_str)
# A bad string, one character not allowed
input_str = 'bp-1234abcd-ef56-7890-ab12-abcdef12345g'
assert not utils.find_crash_id(input_str)
# Close but doesn't end with 6 digits
input_str = 'f48e9617-652a-11dd-a35a-001a4bd43ed6'
assert not utils.find_crash_id(input_str)
# A random string that does not match
input_str = 'somerandomstringthatdoesnotmatch'
assert not utils.find_crash_id(input_str)
def test_unicode_writer(self):
out = StringIO()
writer = utils.UnicodeWriter(out)
writer.writerow([
'abc',
u'\xe4\xc3',
123,
1.23,
])
result = out.getvalue()
assert isinstance(result, str)
u_result = unicode(result, 'utf-8')
assert 'abc,' in u_result
assert u'\xe4\xc3,' in u_result
assert '123,' in u_result
assert '1.23' in u_result
def test_json_view_basic(self):
request = RequestFactory().get('/')
def func(request):
return {'one': 'One'}
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.loads(response.content) == {'one': 'One'}
assert response.status_code == 200
def test_json_view_indented(self):
request = RequestFactory().get('/?pretty=print')
def func(request):
return {'one': 'One'}
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.dumps({'one': 'One'}, indent=2) == response.content
assert response.status_code == 200
def test_json_view_already_httpresponse(self):
request = RequestFactory().get('/')
def func(request):
return HttpResponse('something')
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert response.content == 'something'
assert response.status_code == 200
def test_json_view_custom_status(self):
request = RequestFactory().get('/')
def func(request):
return {'one': 'One'}, 403
func = utils.json_view(func)
response = func(request)
assert isinstance(response, HttpResponse)
assert json.loads(response.content) == {'one': 'One'}
assert response.status_code == 403
| mpl-2.0 | 7,613,279,318,265,646,000 | 35.369603 | 92 | 0.440783 | false |
youfoh/webkit-efl | Tools/Scripts/webkitpy/performance_tests/perftestsrunner.py | 1 | 11902 | #!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run Inspector's perf tests in perf mode."""
import json
import logging
import optparse
import re
import sys
import time
from webkitpy.common import find_files
from webkitpy.common.host import Host
from webkitpy.common.net.file_uploader import FileUploader
from webkitpy.layout_tests.views import printing
from webkitpy.performance_tests.perftest import PerfTestFactory
from webkitpy.performance_tests.perftest import ReplayPerfTest
_log = logging.getLogger(__name__)
class PerfTestsRunner(object):
_default_branch = 'webkit-trunk'
_EXIT_CODE_BAD_BUILD = -1
_EXIT_CODE_BAD_JSON = -2
_EXIT_CODE_FAILED_UPLOADING = -3
_EXIT_CODE_BAD_PREPARATION = -4
def __init__(self, args=None, port=None):
self._options, self._args = PerfTestsRunner._parse_args(args)
if port:
self._port = port
self._host = self._port.host
else:
self._host = Host()
self._port = self._host.port_factory.get(self._options.platform, self._options)
self._host._initialize_scm()
self._webkit_base_dir_len = len(self._port.webkit_base())
self._base_path = self._port.perf_tests_dir()
self._results = {}
self._timestamp = time.time()
@staticmethod
def _parse_args(args=None):
perf_option_list = [
optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
help='Set the configuration to Debug'),
optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
help='Set the configuration to Release'),
optparse.make_option("--platform",
help="Specify port/platform being tested (i.e. chromium-mac)"),
optparse.make_option("--chromium",
action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
optparse.make_option("--builder-name",
help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
optparse.make_option("--build-number",
help=("The build number of the builder running this script.")),
optparse.make_option("--build", dest="build", action="store_true", default=True,
help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
optparse.make_option("--no-build", dest="build", action="store_false",
help="Don't check to see if the DumpRenderTree build is up-to-date."),
optparse.make_option("--build-directory",
help="Path to the directory under which build files are kept (should not include configuration)"),
optparse.make_option("--time-out-ms", default=600 * 1000,
help="Set the timeout for each test"),
optparse.make_option("--pause-before-testing", dest="pause_before_testing", action="store_true", default=False,
help="Pause before running the tests to let user attach a performance monitor."),
optparse.make_option("--output-json-path",
help="Filename of the JSON file that summaries the results."),
optparse.make_option("--source-json-path",
help="Path to a JSON file to be merged into the JSON file when --output-json-path is present."),
optparse.make_option("--test-results-server",
help="Upload the generated JSON file to the specified server when --output-json-path is present."),
optparse.make_option("--webkit-test-runner", "-2", action="store_true",
help="Use WebKitTestRunner rather than DumpRenderTree."),
optparse.make_option("--replay", dest="replay", action="store_true", default=False,
help="Run replay tests."),
optparse.make_option("--force", dest="skipped", action="store_true", default=False,
help="Run all tests, including the ones in the Skipped list."),
]
return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
def _collect_tests(self):
"""Return the list of tests found."""
test_extensions = ['.html', '.svg']
if self._options.replay:
test_extensions.append('.replay')
def _is_test_file(filesystem, dirname, filename):
return filesystem.splitext(filename)[1] in test_extensions
filesystem = self._host.filesystem
paths = []
for arg in self._args:
paths.append(arg)
relpath = filesystem.relpath(arg, self._base_path)
if relpath:
paths.append(relpath)
skipped_directories = set(['.svn', 'resources'])
test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
tests = []
for path in test_files:
relative_path = self._port.relative_perf_test_filename(path).replace('\\', '/')
if self._port.skips_perf_test(relative_path) and not self._options.skipped:
continue
test = PerfTestFactory.create_perf_test(self._port, relative_path, path)
tests.append(test)
return tests
def run(self):
if not self._port.check_build(needs_http=False):
_log.error("Build not up to date for %s" % self._port._path_to_driver())
return self._EXIT_CODE_BAD_BUILD
tests = self._collect_tests()
_log.info("Running %d tests" % len(tests))
for test in tests:
if not test.prepare(self._options.time_out_ms):
return self._EXIT_CODE_BAD_PREPARATION
unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()), self._port)
options = self._options
if self._options.output_json_path:
# FIXME: Add --branch or auto-detect the branch we're in
test_results_server = options.test_results_server
branch = self._default_branch if test_results_server else None
build_number = int(options.build_number) if options.build_number else None
if not self._generate_json(self._timestamp, options.output_json_path, options.source_json_path,
branch, options.platform, options.builder_name, build_number) and not unexpected:
return self._EXIT_CODE_BAD_JSON
if test_results_server and not self._upload_json(test_results_server, options.output_json_path):
return self._EXIT_CODE_FAILED_UPLOADING
return unexpected
def _generate_json(self, timestamp, output_json_path, source_json_path, branch, platform, builder_name, build_number):
contents = {'timestamp': int(timestamp), 'results': self._results}
for (name, path) in self._port.repository_paths():
contents[name + '-revision'] = self._host.scm().svn_revision(path)
for key, value in {'branch': branch, 'platform': platform, 'builder-name': builder_name, 'build-number': build_number}.items():
if value:
contents[key] = value
filesystem = self._host.filesystem
succeeded = False
if source_json_path:
try:
source_json_file = filesystem.open_text_file_for_reading(source_json_path)
source_json = json.load(source_json_file)
contents = dict(source_json.items() + contents.items())
succeeded = True
except IOError, error:
_log.error("Failed to read %s: %s" % (source_json_path, error))
except ValueError, error:
_log.error("Failed to parse %s: %s" % (source_json_path, error))
except TypeError, error:
_log.error("Failed to merge JSON files: %s" % error)
if not succeeded:
return False
filesystem.write_text_file(output_json_path, json.dumps(contents))
return True
def _upload_json(self, test_results_server, json_path, file_uploader=FileUploader):
uploader = file_uploader("https://%s/api/test/report" % test_results_server, 120)
try:
response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
except Exception, error:
_log.error("Failed to upload JSON file in 120s: %s" % error)
return False
response_body = [line.strip('\n') for line in response]
if response_body != ['OK']:
_log.error("Uploaded JSON but got a bad response:")
for line in response_body:
_log.error(line)
return False
_log.info("JSON file uploaded.")
return True
def _print_status(self, tests, expected, unexpected):
if len(tests) == expected + unexpected:
status = "Ran %d tests" % len(tests)
else:
status = "Running %d of %d tests" % (expected + unexpected + 1, len(tests))
if unexpected:
status += " (%d didn't run)" % unexpected
_log.info(status)
def _run_tests_set(self, tests, port):
result_count = len(tests)
expected = 0
unexpected = 0
driver = None
for test in tests:
driver = port.create_driver(worker_number=1, no_timeout=True)
if self._options.pause_before_testing:
driver.start()
if not self._host.user.confirm("Ready to run test?"):
driver.stop()
return unexpected
_log.info('Running %s (%d of %d)' % (test.test_name(), expected + unexpected + 1, len(tests)))
if self._run_single_test(test, driver):
expected = expected + 1
else:
unexpected = unexpected + 1
_log.info('')
driver.stop()
return unexpected
def _run_single_test(self, test, driver):
start_time = time.time()
new_results = test.run(driver, self._options.time_out_ms)
if new_results:
self._results.update(new_results)
else:
_log.error('FAILED')
_log.debug("Finished: %f s" % (time.time() - start_time))
return new_results != None
| lgpl-2.1 | -2,935,279,222,566,545,400 | 44.083333 | 135 | 0.621492 | false |
digitalocean/netbox | netbox/ipam/migrations/0016_unicode_literals.py | 2 | 6052 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-24 15:34
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import ipam.fields
class Migration(migrations.Migration):
dependencies = [
('ipam', '0015_global_vlans'),
]
operations = [
migrations.AlterField(
model_name='aggregate',
name='family',
field=models.PositiveSmallIntegerField(choices=[(4, 'IPv4'), (6, 'IPv6')]),
),
migrations.AlterField(
model_name='aggregate',
name='rir',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='aggregates', to='ipam.RIR', verbose_name='RIR'),
),
migrations.AlterField(
model_name='ipaddress',
name='address',
field=ipam.fields.IPAddressField(help_text='IPv4 or IPv6 address (with mask)'),
),
migrations.AlterField(
model_name='ipaddress',
name='family',
field=models.PositiveSmallIntegerField(choices=[(4, 'IPv4'), (6, 'IPv6')], editable=False),
),
migrations.AlterField(
model_name='ipaddress',
name='nat_inside',
field=models.OneToOneField(blank=True, help_text='The IP for which this address is the "outside" IP', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='nat_outside', to='ipam.IPAddress', verbose_name='NAT (Inside)'),
),
migrations.AlterField(
model_name='ipaddress',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Active'), (2, 'Reserved'), (3, 'Deprecated'), (5, 'DHCP')], default=1, verbose_name='Status'),
),
migrations.AlterField(
model_name='ipaddress',
name='vrf',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='ip_addresses', to='ipam.VRF', verbose_name='VRF'),
),
migrations.AlterField(
model_name='prefix',
name='family',
field=models.PositiveSmallIntegerField(choices=[(4, 'IPv4'), (6, 'IPv6')], editable=False),
),
migrations.AlterField(
model_name='prefix',
name='is_pool',
field=models.BooleanField(default=False, help_text='All IP addresses within this prefix are considered usable', verbose_name='Is a pool'),
),
migrations.AlterField(
model_name='prefix',
name='prefix',
field=ipam.fields.IPNetworkField(help_text='IPv4 or IPv6 network with mask'),
),
migrations.AlterField(
model_name='prefix',
name='role',
field=models.ForeignKey(blank=True, help_text='The primary function of this prefix', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='prefixes', to='ipam.Role'),
),
migrations.AlterField(
model_name='prefix',
name='status',
field=models.PositiveSmallIntegerField(choices=[(0, 'Container'), (1, 'Active'), (2, 'Reserved'), (3, 'Deprecated')], default=1, help_text='Operational status of this prefix', verbose_name='Status'),
),
migrations.AlterField(
model_name='prefix',
name='vlan',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='ipam.VLAN', verbose_name='VLAN'),
),
migrations.AlterField(
model_name='prefix',
name='vrf',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='prefixes', to='ipam.VRF', verbose_name='VRF'),
),
migrations.AlterField(
model_name='rir',
name='is_private',
field=models.BooleanField(default=False, help_text='IP space managed by this RIR is considered private', verbose_name='Private'),
),
migrations.AlterField(
model_name='service',
name='device',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='services', to='dcim.Device', verbose_name='device'),
),
migrations.AlterField(
model_name='service',
name='ipaddresses',
field=models.ManyToManyField(blank=True, related_name='services', to='ipam.IPAddress', verbose_name='IP addresses'),
),
migrations.AlterField(
model_name='service',
name='port',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)], verbose_name='Port number'),
),
migrations.AlterField(
model_name='service',
name='protocol',
field=models.PositiveSmallIntegerField(choices=[(6, 'TCP'), (17, 'UDP')]),
),
migrations.AlterField(
model_name='vlan',
name='status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Active'), (2, 'Reserved'), (3, 'Deprecated')], default=1, verbose_name='Status'),
),
migrations.AlterField(
model_name='vlan',
name='vid',
field=models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4094)], verbose_name='ID'),
),
migrations.AlterField(
model_name='vrf',
name='enforce_unique',
field=models.BooleanField(default=True, help_text='Prevent duplicate prefixes/IP addresses within this VRF', verbose_name='Enforce unique space'),
),
migrations.AlterField(
model_name='vrf',
name='rd',
field=models.CharField(max_length=21, unique=True, verbose_name='Route distinguisher'),
),
]
| apache-2.0 | -8,732,237,285,541,431,000 | 45.198473 | 249 | 0.598976 | false |
jualvarez/charlex | 01-Inicial/charlexapi/charlas/migrations/0001_initial.py | 3 | 4278 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-14 02:07
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Charla',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=100, verbose_name='título')),
('hora', models.DateTimeField(verbose_name='hora')),
('duracion', models.DurationField(verbose_name='duración')),
('descripcion', models.TextField(null=True, verbose_name='descripción de la charla')),
],
options={
'verbose_name_plural': 'charlas',
'verbose_name': 'charla',
},
),
migrations.CreateModel(
name='FotoCharla',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('foto', models.ImageField(upload_to='fotoscharla')),
('charla', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='charlas.Charla')),
('usuario', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Lugar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='nombre del lugar')),
],
options={
'verbose_name_plural': 'lugares',
'verbose_name': 'lugar',
},
),
migrations.CreateModel(
name='Orador',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=100, verbose_name='nombre')),
('bio', models.TextField(verbose_name='curriculum vitae')),
('foto', models.ImageField(upload_to='fotosorador', verbose_name='foto')),
],
options={
'verbose_name_plural': 'oradores',
'verbose_name': 'orador',
},
),
migrations.CreateModel(
name='UsuarioCharla',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(null=True, verbose_name='rating')),
('charla', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='charlas.Charla')),
('usuario', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='charla',
name='asistentes',
field=models.ManyToManyField(related_name='charlas', through='charlas.UsuarioCharla', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='charla',
name='fotos',
field=models.ManyToManyField(related_name='fotos_charlas', through='charlas.FotoCharla', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='charla',
name='lugar',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='charlas.Lugar', verbose_name='lugar'),
),
migrations.AddField(
model_name='charla',
name='orador',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='charlas.Orador', verbose_name='orador'),
),
migrations.AlterUniqueTogether(
name='usuariocharla',
unique_together=set([('usuario', 'charla')]),
),
]
| gpl-3.0 | 7,809,415,429,739,944,000 | 42.181818 | 133 | 0.566082 | false |
govarguz/espressopp | src/interaction/SmoothSquareWell.py | 1 | 7137 | # Copyright (C) 2017,2018
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
espressopp.interaction.SmoothSquareWell
***************************************
This is an implementation of the smoothed square-well potential from `Leitold and Dellago JCP 141, 134901 (2014) <https://doi.org/10.1063/1.4896560>`_ :
.. math::
V(r) = \frac{\varepsilon}{2} \left\{ \exp\left[\frac{-(r-\sigma)}{a}\right] + \tanh\left[\frac{r-\lambda\sigma}{a}\right] - 1 \right\},
of which :math:`a` dictates the steepness of the slope of the square well, and :math:`{\lambda\sigma}` determines the width of the step, and :math:`{\sigma}` is the bond length of the polymer.
To reproduce the potential in the prior reference, use the code below.
.. code:: python
pot = espressopp.interaction.SmoothSquareWell(epsilon=1.0,sigma=1.0,cutoff=2.5)
pot.a = 0.002
pot.Lambda = 1.05
The SmoothSquareWell potential supports VerletListInteraction, FixedPairListInteraction and FixedPairListTypesInteraction.
"""
from espressopp import pmi, infinity
from espressopp.esutil import *
from espressopp.interaction.Potential import *
from espressopp.interaction.Interaction import *
from _espressopp import interaction_SmoothSquareWell, interaction_VerletListSmoothSquareWell, \
interaction_FixedPairListSmoothSquareWell, interaction_FixedPairListTypesSmoothSquareWell
class SmoothSquareWellLocal(PotentialLocal, interaction_SmoothSquareWell):
def __init__(self, epsilon=1.0, sigma=0.0, cutoff=infinity, shift=0.0):
"""Initialize the local SmoothSquareWell object."""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
if shift == "auto":
cxxinit(self, interaction_SmoothSquareWell, epsilon, sigma, cutoff)
else:
cxxinit(self, interaction_SmoothSquareWell, epsilon, sigma, cutoff, shift)
class VerletListSmoothSquareWellLocal(InteractionLocal, interaction_VerletListSmoothSquareWell):
def __init__(self, vl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_VerletListSmoothSquareWell, vl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getPotential(self, type1, type2)
def getVerletListLocal(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getVerletList(self)
class FixedPairListSmoothSquareWellLocal(InteractionLocal, interaction_FixedPairListSmoothSquareWell):
def __init__(self, system, fpl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListSmoothSquareWell, system, fpl, potential)
def setPotential(self, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, potential)
def getPotential(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getPotential(self)
def setFixedPairList(self, fpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fpl)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
class FixedPairListTypesSmoothSquareWellLocal(InteractionLocal, interaction_FixedPairListTypesSmoothSquareWell):
def __init__(self, system, fpl, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, interaction_FixedPairListSmoothSquareWell, system, fpl)
def setPotential(self, type1, type2, potential):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setPotential(self, type1, type2, potential)
def getPotential(self, type1, type2):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.getPotential(self, type1, type2)
def setFixedPairList(self, fpl):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.setFixedPairList(self, fpl)
def getFixedPairList(self):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
return self.cxxclass.getFixedPairList(self)
if pmi.isController:
class SmoothSquareWell(Potential):
'The SmoothSquareWell potential.'
pmiproxydefs = dict(
cls = 'espressopp.interaction.SmoothSquareWellLocal',
pmiproperty = ['epsilon', 'sigma', 'Lambda', 'a']
)
class VerletListSmoothSquareWell(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.VerletListSmoothSquareWellLocal',
pmicall = ['setPotential', 'getPotential', 'getVerletList']
)
class FixedPairListSmoothSquareWell(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListSmoothSquareWellLocal',
pmicall = ['setPotential', 'getPotential', 'setFixedPairList','getFixedPairList']
)
class FixedPairListTypesSmoothSquareWell(Interaction):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.interaction.FixedPairListTypesSmoothSquareWellLocal',
pmicall = ['setPotential', 'getPotential', 'setFixedPairList', 'getFixedPairList']
)
| gpl-3.0 | -1,389,504,287,348,196,400 | 46.264901 | 192 | 0.697632 | false |
ttm/gmaneLegacy | tests/taggerTrain.py | 1 | 4092 | import nltk as k, pickle
tagger=k.data.load('taggers/maxent_treebank_pos_tagger/english.pickle')
# levou muito tempo, retornou:
# tagger.evaluate(k.corpus.brown.tagged_sents())
# 0.5952331741865255
# pq as tags não são as mesmas?
# Receita do Brill na própria classe do nltk
from nltk.tbl.template import Template
from nltk.tag.brill import Pos, Word
from nltk.tag import RegexpTagger, BrillTaggerTrainer
from nltk.corpus import treebank
training_data = treebank.tagged_sents()[:100]
baseline_data = treebank.tagged_sents()[100:200]
gold_data = treebank.tagged_sents()[200:300]
#testing_data = [untag(s) for s in gold_data]
testing_data = [[ss[0] for ss in s] for s in gold_data]
backoff = RegexpTagger([
(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), # cardinal numbers
(r'(The|the|A|a|An|an)$', 'AT'), # articles
(r'.*able$', 'JJ'), # adjectives
(r'.*ness$', 'NN'), # nouns formed from adjectives
(r'.*ly$', 'RB'), # adverbs
(r'.*s$', 'NNS'), # plural nouns
(r'.*ing$', 'VBG'), # gerunds
(r'.*ed$', 'VBD'), # past tense verbs
(r'.*', 'NN') # nouns (default)
])
baseline = backoff
baseline.evaluate(gold_data)
Template._cleartemplates() #clear any templates created in earlier tests
templates = [Template(Pos([-1])), Template(Pos([-1]), Word([0]))]
tt = BrillTaggerTrainer(baseline, templates, trace=3)
tagger1 = tt.train(training_data, max_rules=10)
tagger1.rules()[1:3]
train_stats = tagger1.train_stats()
tagger1.print_template_statistics(printunused=False)
tagger1.evaluate(gold_data)
tagged, test_stats = tagger1.batch_tag_incremental(testing_data, gold_data)
tagger2 = tt.train(training_data, max_rules=10, min_acc=0.99)
print(tagger2.evaluate(gold_data)) # doctest: +ELLIPSIS
tagger2.rules()[2:4]
#nn_cd_tagger = k.tag.RegexpTagger([(r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')])
nn_cd_tagger = baseline
#tagged_data = k.corpus.treebank.tagged_sents()
tagged_data = k.corpus.treebank.tagged_sents(tagset="universal")
tagged_data2 = k.corpus.brown.tagged_sents(tagset="universal")
num_sents=len(tagged_data)
num_sents2=len(tagged_data2)
train=0.8
cutoff = int(num_sents *train)
cutoff2 = int(num_sents2*train)
training_data = tagged_data[:cutoff]+tagged_data2[:cutoff2]
gold_data = tagged_data[cutoff:]+tagged_data2[cutoff2:]
testing_data = [[t[0] for t in sent] for sent in gold_data]
print("Done loading.")
unigram_tagger = k.tag.UnigramTagger(training_data,backoff=nn_cd_tagger)
bigram_tagger = k.tag.BigramTagger(training_data,
backoff=unigram_tagger)
##templates = [
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (1,3)),
##
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,1)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (2,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,2)),
## k.tag.brill.SymmetricProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (1,3)),
##
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateTagsRule, (-1, -1), (1,1)),
## k.tag.brill.ProximateTokensTemplate(k.tag.brill.ProximateWordsRule, (-1, -1), (1,1)),
## ]
trace=5
trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 0)
#trainer = k.tag.BrillTaggerTrainer(bigram_tagger, templates, 2)
#trainer = k.tag.brill.BrillTaggerTrainer(bigram_tagger, trace)
##trainer = brill.BrillTaggerTrainer(u, templates, trace)
max_rules=40000
min_score=2
#brill_tagger = trainer.train(training_data, max_rules, min_score)
brill_tagger = trainer.train(training_data, max_rules, 1)
f=open("./pickledir/brill_tagger5", 'wb')
pickle.dump(brill_tagger,f,-1)
f.close()
# acerto de: 0.9180
| unlicense | -2,526,869,134,886,263,000 | 40.30303 | 122 | 0.691612 | false |
GHubgenius/Hammer | lib/mysql_class.py | 3 | 1634 | #!/usr/bin/python2.7
#coding:utf-8
'''
'''
import MySQLdb
class MySQLHelper:
'''MySQLdb 简单封装类'''
def __init__(self,host,user,password,charset="utf8"):
self.host=host
self.user=user
self.password=password
self.charset=charset
try:
self.conn=MySQLdb.connect(host=self.host,user=self.user,passwd=self.password)
self.conn.set_character_set(self.charset)
self.cur=self.conn.cursor()
except MySQLdb.Error as e:
print("Mysql Error %d: %s" % (e.args[0], e.args[1]))
def selectDb(self,db):
try:
self.conn.select_db(db)
except MySQLdb.Error as e:
print("Mysql Error %d: %s" % (e.args[0], e.args[1]))
def query(self,sql):
try:
n=self.cur.execute(sql)
return n
except MySQLdb.Error as e:
print("Mysql Error:%s\nSQL:%s" %(e,sql))
def queryRow(self,sql):
self.query(sql)
result = self.cur.fetchone()
return result
def queryAll(self,sql):
self.query(sql)
result=self.cur.fetchall()
desc =self.cur.description
d = []
for inv in result:
_d = {}
for i in range(0,len(inv)):
_d[desc[i][0]] = str(inv[i])
d.append(_d)
return d
def insert(self,p_table_name,p_data):
for key in p_data:
p_data[key] = "'"+str(p_data[key])+"'"
key = ','.join(p_data.keys())
value = ','.join(p_data.values())
real_sql = "INSERT INTO " + p_table_name + " (" + key + ") VALUES (" + value + ")"
#self.query("set names 'utf8'")
return self.query(real_sql)
def getLastInsertId(self):
return self.cur.lastrowid
def rowcount(self):
return self.cur.rowcount
def commit(self):
self.conn.commit()
def close(self):
self.cur.close()
self.conn.close() | gpl-2.0 | -432,467,914,759,953,660 | 21.260274 | 84 | 0.638547 | false |
bpsmith/tia | tia/analysis/model/port.py | 1 | 13375 | from collections import OrderedDict
import pandas as pd
from tia.analysis.model.interface import CostCalculator, EodMarketData, PositionColumns as PC
from tia.analysis.model.pos import Positions
from tia.analysis.model.ret import RoiiRetCalculator
from tia.analysis.model.txn import Txns
from tia.analysis.util import insert_level
from tia.util.decorator import lazy_property
__all__ = ['SingleAssetPortfolio', 'PortfolioPricer', 'PortfolioSubset', 'PortfolioSummary']
class PortfolioPricer(CostCalculator, EodMarketData):
def __init__(self, multiplier=1., closing_pxs=None, dvds=None):
if not isinstance(closing_pxs, pd.Series):
raise ValueError('closing_pxs must be a Series not {0}'.format(type(closing_pxs)))
if dvds is not None and not isinstance(dvds, pd.Series):
raise ValueError('dvds be a Series not {0}'.format(type(dvds)))
self._multiplier = multiplier
self._dvds = dvds
self._closing_pxs = closing_pxs
multiplier = property(lambda self: self._multiplier)
dvds = property(lambda self: self._dvds)
def truncate(self, before=None, after=None):
return PortfolioPricer(self.multiplier, self._closing_pxs.truncate(before=before, after=after), dvds=self._dvds)
def get_closing_pxs(self, start=None, end=None):
pxs = self._closing_pxs
if start or end:
start = start or pxs.index[0]
end = end or pxs.index[-1]
pxs = pxs.ix[start:end]
return pxs
def get_mkt_val(self, pxs=None):
""" return the market value series for the specified Series of pxs """
pxs = self._closing_pxs if pxs is None else pxs
return pxs * self.multiplier
def get_premium(self, qty, px, ts=None):
return -qty * px * self.multiplier
def get_eod_frame(self):
close = self.get_closing_pxs()
mktval = self.get_mkt_val(close)
dvds = self.dvds
df = pd.DataFrame({'close': close, 'mkt_val': mktval, 'dvds': dvds})
df.index.name = 'date'
# drop dvds outside the px range
return df.truncate(before=close.index[0], after=close.index[-1])
class SingleAssetPortfolio(object):
def __init__(self, pricer, trades, ret_calc=None):
"""
:param pricer: PortfolioPricer
:param trades: list of Trade objects
"""
self.trades = tuple(trades)
self.pricer = pricer
self._ret_calc = ret_calc or RoiiRetCalculator()
txns = lazy_property(lambda self: Txns(self.trades, self.pricer, self.ret_calc), 'txns')
positions = lazy_property(lambda self: Positions(self.txns), 'positions')
pl = property(lambda self: self.txns.pl)
performance = property(lambda self: self.txns.performance)
# --------------------------------------------------
# direct access to common attributes
dly_pl = property(lambda self: self.pl.dly)
monthly_pl = property(lambda self: self.pl.monthly)
dly_rets = property(lambda self: self.performance.dly)
monthly_rets = property(lambda self: self.performance.monthly)
@property
def ret_calc(self):
return self._ret_calc
@ret_calc.setter
def ret_calc(self, calc):
self._ret_calc = calc
if hasattr(self, '_txns'):
self.txns.ret_calc = calc
def clear_cache(self):
for attr in ['_txns', '_positions', '_long', '_short']:
if hasattr(self, attr):
delattr(self, attr)
def subset(self, pids):
txns = self.txns
stxns = txns.subset(pids)
if stxns == txns: # return same object
return self
else:
# TODO: rethink logic - maybe split trades (l/s) in Portfolio constructor as now
# passing split trades back to portfolio subset
port = SingleAssetPortfolio(self.pricer, stxns.trades, ret_calc=self.ret_calc)
port._txns = stxns
if hasattr(self, '_positions'):
port._positions = self.positions.subset(stxns)
return port
@lazy_property
def long(self):
return PortfolioSubset.longs(self)
@lazy_property
def short(self):
return PortfolioSubset.shorts(self)
winner = property(lambda self: PortfolioSubset.winners(self))
loser = property(lambda self: PortfolioSubset.losers(self))
def buy_and_hold(self, qty=1., start_dt=None, end_dt=None, start_px=None, end_px=None):
"""Construct a portfolio which opens a position with size qty at start (or first data in pricer) and
continues to the specified end date. It uses the end of day market prices defined by the pricer
(or prices supplied)
:param qty:
:param start: datetime
:param end: datetime
:param which: which price series to use for inital trade px
:param ret_cacls: portfolio return calculator
:return: SingleAssetPortfolio
"""
from tia.analysis.model.trd import TradeBlotter
eod = self.pricer.get_eod_frame().close
start_dt = start_dt and pd.to_datetime(start_dt) or eod.index[0]
start_px = start_px or eod.asof(start_dt)
end_dt = end_dt and pd.to_datetime(end_dt) or eod.index[-1]
end_px = end_px or eod.asof(end_dt)
pricer = self.pricer.trunace(start_dt, end_dt)
blotter = TradeBlotter()
blotter.ts = start_dt
blotter.open(qty, start_px)
blotter.ts = end_dt
blotter.close(end_px)
trds = blotter.trades
return SingleAssetPortfolio(pricer, trds, ret_calc=self.ret_calc)
class PortfolioSubset(object):
@staticmethod
def longs(port):
return port.subset(port.positions.long_pids)
@staticmethod
def shorts(port):
return port.subset(port.positions.short_pids)
@staticmethod
def winners(port):
frame = port.positions.frame
pids = frame[frame[PC.PL] >= 0].index
return port.subset(pids)
@staticmethod
def losers(port):
frame = port.positions.frame
pids = frame[frame[PC.PL] < 0].index
return port.subset(pids)
@staticmethod
def top_pl(port, n=10):
pids = port.positions.frame[PC.PL].order()[-n:].index
return port.subset(pids)
@staticmethod
def top_rets(port, n=10):
pids = port.positions.frame[PC.RET].order()[-n:].index
return port.subset(pids)
@staticmethod
def bottom_pl(port, n=10):
pids = port.positions.frame[PC.PL].order()[:n].index
return port.subset(pids)
@staticmethod
def bottom_rets(port, n=10):
pids = port.positions.frame[PC.RET].order()[:n].index
return port.subset(pids)
@staticmethod
def top_durations(port, n=10):
pids = port.positions.frame[PC.DURATION].order()[-n:].index
return port.subset(pids)
@staticmethod
def bottom_durations(port, n=10):
pids = port.positions.frame[PC.DURATION].order()[:n].index
return port.subset(pids)
class PortfolioSummary(object):
def __init__(self):
self.total_key = 'All'
self.iter_fcts = []
def __call__(self, port, analyze_fct=None):
""" analyze_fct: fct(port) which can return Series, or map of key to Series. If key to series, then
the key is used as an additional index value.
:param port: Portfolio or dict of key->Portfolio
:param analyze_fct:
:return:
"""
iter_fcts = self.iter_fcts
lvls = len(iter_fcts)
analyze_fct = self.analyze_returns if analyze_fct is None else analyze_fct
def _iter_all_lvls(lvl, keys, parent, results):
if lvl < (lvls - 1):
# exhaust combinations
for key, child in iter_fcts[lvl](parent):
_iter_all_lvls(lvl + 1, keys + [key], child, results)
else:
# at the bottom
for key, child in iter_fcts[lvl](parent):
idx_names = ['lvl{0}'.format(i + 1) for i in range(lvls)]
idx_vals = [[k] for k in keys + [key]]
idx = pd.MultiIndex.from_arrays(idx_vals, names=idx_names)
res = analyze_fct(child)
if isinstance(res, pd.Series):
res = res.to_frame().T
res.index = idx
results.append(res)
else:
for k, v in res.iteritems():
# prepend current levels to key name
v = v.to_frame().T
idx = pd.MultiIndex.from_arrays(idx_vals + [k], names=idx_names + ['lvl%s' % lvls])
v.index = idx
results.append(v)
if lvls == 0:
def _get_res(p):
res = analyze_fct(p)
return res.to_frame().T if isinstance(res, pd.Series) else res
if hasattr(port, 'iteritems'):
pieces = []
for k, p in port.iteritems():
res = _get_res(p)
defidx = res.index.nlevels == 1 and (res.index == 0).all()
res = insert_level(res, k, axis=1, level_name='lvl1')
if defidx:
res.index = res.index.droplevel(1)
pieces.append(res)
return pd.concat(pieces)
else:
return _get_res(port)
else:
if hasattr(port, 'iteritems'):
pieces = []
for k, p in port.iteritems():
results = []
_iter_all_lvls(0, [], p, results)
tmp = pd.concat(results)
tmp.index.names = ['lvl%s' % (i + 2) for i in range(len(tmp.index.names))]
tmp = insert_level(tmp, k, level_name='lvl1', axis=1)
pieces.append(tmp)
return pd.concat(pieces)
else:
results = []
_iter_all_lvls(0, [], port, results)
return pd.concat(results)
def add_iter_fct(self, siter):
self.iter_fcts.append(siter)
return self
def include_win_loss(self, total=1):
def _split_port(port):
if total:
yield self.total_key, port
yield 'winner', PortfolioSubset.winners(port)
yield 'loser', PortfolioSubset.losers(port)
self.add_iter_fct(_split_port)
return self
def include_long_short(self, total=1):
def _split_port(port):
if total:
yield self.total_key, port
yield 'long', port.long
yield 'short', port.short
self.add_iter_fct(_split_port)
return self
@staticmethod
def analyze_returns(port):
monthly = port.performance.monthly_details
dly = port.performance.dly_details
stats = port.positions.stats
data = OrderedDict()
data[('port', 'ltd ann')] = monthly.ltd_ann
data[('port', 'mret avg')] = monthly.mean
data[('port', 'mret avg ann')] = monthly.mean_ann
data[('port', 'mret std ann')] = monthly.std_ann
data[('port', 'sharpe ann')] = monthly.sharpe_ann
data[('port', 'sortino')] = monthly.sortino
data[('port', 'maxdd')] = dly.maxdd
data[('port', 'maxdd dt')] = dly.maxdd_dt
data[('port', 'avg dd')] = dly.dd_avg
data[('port', 'nmonths')] = monthly.cnt
# pos data
data[('pos', 'cnt')] = stats.cnt
data[('pos', 'win cnt')] = stats.win_cnt
data[('pos', 'lose cnt')] = stats.lose_cnt
data[('pos', 'winpct')] = stats.win_pct
data[('pos', 'ret avg')] = stats.ret_avg
data[('pos', 'ret std')] = stats.ret_std
data[('pos', 'ret min')] = stats.ret_min
data[('pos', 'ret max')] = stats.ret_max
data[('pos', 'dur avg')] = stats.duration_avg
data[('pos', 'dur max')] = stats.duration_max
return pd.Series(data, index=pd.MultiIndex.from_tuples(data.keys()))
@staticmethod
def analyze_pl(port):
monthly = port.pl.monthly_details
dstats = port.pl.dly_details
stats = port.positions.stats
data = OrderedDict()
data[('port', 'ltd')] = monthly.ltd_frame.pl.iloc[-1]
data[('port', 'mpl avg')] = monthly.mean
data[('port', 'mpl std')] = monthly.std
data[('port', 'mpl std ann')] = monthly.std_ann
data[('port', 'mpl max')] = monthly.frame.pl.max()
data[('port', 'mpl min')] = monthly.frame.pl.min()
data[('port', 'maxdd')] = dstats.maxdd
data[('port', 'maxdd dt')] = dstats.maxdd_dt
data[('port', 'avg dd')] = dstats.dd_avg
data[('port', 'nmonths')] = monthly.cnt
# pos data
data[('pos', 'cnt')] = stats.cnt
data[('pos', 'win cnt')] = stats.win_cnt
data[('pos', 'lose cnt')] = stats.lose_cnt
data[('pos', 'winpct')] = stats.win_pct
data[('pos', 'pl avg')] = stats.pl_avg
data[('pos', 'pl std')] = stats.pl_std
data[('pos', 'pl min')] = stats.pl_min
data[('pos', 'pl max')] = stats.pl_max
return pd.Series(data, index=pd.MultiIndex.from_tuples(data.keys()))
| bsd-3-clause | -2,618,324,272,540,386,300 | 36.464986 | 120 | 0.566355 | false |
DenisCarriere/geocoder | geocoder/ottawa.py | 1 | 2767 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
import re
from geocoder.base import OneResult, MultipleResultsQuery
class OttawaResult(OneResult):
@property
def lat(self):
return self.raw.get('location', {}).get('y')
@property
def lng(self):
return self.raw.get('location', {}).get('x')
@property
def postal(self):
if self.address:
expression = r'([ABCEGHJKLMNPRSTVXY]{1}\d{1}[A-Z]{1}( *\d{1}[A-Z]{1}\d{1})?)'
pattern = re.compile(expression)
match = pattern.search(self.address.upper())
if match:
return match.group(0)
@property
def housenumber(self):
if self.address:
expression = r'\d+'
pattern = re.compile(expression)
match = pattern.search(self.address)
if match:
return int(match.group(0))
@property
def city(self):
return 'Ottawa'
@property
def state(self):
return 'Ontario'
@property
def country(self):
return 'Canada'
@property
def address(self):
return self.raw.get('address')
@property
def accuracy(self):
return self.raw.get('score')
class OttawaQuery(MultipleResultsQuery):
"""
Ottawa ArcGIS REST Services
===========================
Geocoding is the process of assigning a location, usually in the form of
coordinate values (points), to an address by comparing the descriptive
location elements in the address to those present in the reference
material. Addresses come in many forms, ranging from the common address
format of a house number followed by the street name and succeeding
information to other location descriptions such as postal zone or census
tract. An address includes any type of information that distinguishes
a place.
API Reference
-------------
http://maps.ottawa.ca/ArcGIS/rest/services/compositeLocator/GeocodeServer/findAddressCandidates
"""
provider = 'ottawa'
method = 'geocode'
_URL = 'http://maps.ottawa.ca/ArcGIS/rest/services/compositeLocator/GeocodeServer/findAddressCandidates'
_RESULT_CLASS = OttawaResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
return {
'SingleLine': location.replace(', Ottawa, ON', ''),
'f': 'json',
'outSR': 4326,
'maxLocations': kwargs.get('maxRows', 1)
}
def _adapt_results(self, json_response):
return json_response.get('candidates', [])
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = OttawaQuery('1552 Payette dr.')
g.debug()
| mit | 4,576,178,556,643,008,000 | 26.949495 | 108 | 0.617636 | false |
rosspalmer/Caelum | caelum/stellar_system_model/planet_generator.py | 1 | 4702 | from planet_model import Planet
import math as mt
import random as rn
M_jupiter = 2e27 # kg
R_jupiter = 71492 # km
M_earth = 6e24 # kg
R_earth = 6378 # km
def generate_planet(star_properties, planet_type, au):
parameters = {'gas_giant': {'atmosphere': {'H': 0.9, 'He': 0.1},
'mass_unit': M_jupiter,
'min_mass': 0.5, 'max_mass': 8,
'radius_unit': R_jupiter,
'min_r': 0.8, 'max_r': 1.5,
'min_a': 0.2, 'max_a': 0.4},
'ice_giant': {'atmosphere': {'H': 0.8, 'He': 0.2},
'mass_unit': M_jupiter,
'min_mass': 0.3, 'max_mass': 1.5,
'radius_unit': R_jupiter,
'min_r': 0.3, 'max_r': 0.8,
'min_a': 0.2, 'max_a': 0.4},
'desert': {'atmosphere': {},
'atm_min': 0.01, 'atm_max': 0.5,
'mass_unit': M_earth,
'min_mass': 0.5, 'max_mass': 3,
'radius_unit': R_earth,
'min_a': 0.1, 'max_a': 0.6},
'ocean': {'atmosphere': {},
'atm_min': 0.5, 'atm_max': 10,
'mass_unit': M_earth,
'min_mass': 0.5, 'max_mass': 3,
'radius_unit': R_earth,
'min_a': 0.03, 'max_a': 0.1},
'titan': {'atmosphere': {},
'atm_min': 0.5, 'atm_max': 10,
'mass_unit': M_earth,
'min_mass': 0.5, 'max_mass': 3,
'radius_unit': R_earth,
'min_a': 0.5, 'max_a': 0.8},
'terran': {'atmosphere': {},
'atm_min': 0.5, 'atm_max': 10,
'mass_unit': M_earth,
'min_mass': 0.5, 'max_mass': 3,
'radius_unit': R_earth,
'min_a': 0.3, 'max_a': 0.6},
'barren': {'mass_unit': M_earth,
'min_mass': 0.5, 'max_mass': 3,
'radius_unit': R_earth,
'min_a': 0.1, 'max_a': 0.3}
}
param = parameters[planet_type]
prop = {'planet_type': planet_type, 'au': au}
prop['mass'] = rn.uniform(round(param['min_mass'], 4),
round(param['max_mass'], 4))
prop['mass_unit'] = param['mass_unit']
if 'min_r' not in param and 'max_r' not in param:
prop['radius'] = rn.normalvariate(prop['mass'], 0.1)
else:
prop['radius'] = rn.uniform(round(param['min_r'], 4),
round(param['max_r'], 4))
prop['radius_unit'] = param['radius_unit']
volume = (4 / 3) * mt.pi * mt.pow(prop['radius'], 3)
prop['density'] = prop['mass'] / volume
prop['g'] = calc_gravity(prop)
prop['semi_axis'] = prop['au'] * 1.496e11 # meters
period_sec = calc_period(prop, star_properties) # seconds
prop['period'] = period_sec / (24 * 60 * 60)
prop['rot'] = rn.random()
prop['rotation'] = calc_rotation(prop) # days
if 'atm_min' in param:
prop['atm'] = rn.uniform(round(param['atm_min'], 4),
round(param['atm_max'], 4))
prop['alb'] = rn.uniform(round(param['min_a'], 4),
round(param['max_a'], 4))
prop['temp'] = calc_temp(prop, star_properties) # K
return Planet(prop)
def calc_period(prop, star):
G = 6.674e-11
u = G * star['mass'] * star['mass_unit']
period = 2 * mt.pi * mt.sqrt((mt.pow(prop['semi_axis'], 3) / u))
return period
def calc_temp(prop, star):
A_ratio = (prop['rot'] * 0.25) + 0.25
prop_constant = 5.67e-8
num = A_ratio * star['lum'] * star['lum_unit'] * (1 - prop['alb'])
#|Note 0.96 is emission which should be modeled later
dem = 4 * mt.pi * prop_constant * 0.96 * mt.pow(prop['semi_axis'], 2)
temp = mt.pow(num / dem, 0.25)
return temp
def calc_rotation(prop):
rotation = prop['rot'] * (30 - 1) + 1 # days
return rotation
def calc_gravity(prop):
earth_ratio = 5.98e24 / mt.pow(1.276e7, 2)
planet_ratio = (prop['mass'] * prop['mass_unit']) / \
mt.pow(prop['radius'] * prop['radius_unit'] * 1000, 2)
gravity = planet_ratio / earth_ratio
return gravity | apache-2.0 | 4,304,546,259,769,949,700 | 33.07971 | 74 | 0.417695 | false |
medspx/QGIS | tests/src/python/test_qgslayoutpolygon.py | 3 | 10772 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutItemPolygon.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2016 by Paul Blottiere'
__date__ = '14/03/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.PyQt.QtGui import QPolygonF
from qgis.PyQt.QtCore import QPointF
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsLayoutItemPolygon,
QgsLayoutItemRegistry,
QgsLayout,
QgsFillSymbol,
QgsProject,
QgsReadWriteContext)
from qgis.testing import (start_app,
unittest
)
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutPolygon(unittest.TestCase):
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
# create composition
self.layout = QgsLayout(QgsProject.instance())
self.layout.initializeDefaults()
# create
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(100.0, 0.0))
polygon.append(QPointF(200.0, 100.0))
polygon.append(QPointF(100.0, 200.0))
self.polygon = QgsLayoutItemPolygon(polygon, self.layout)
self.layout.addLayoutItem(self.polygon)
# style
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "black"
props["width_border"] = "10.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
self.polygon.setSymbol(style)
def testNodes(self):
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(100.0, 0.0))
polygon.append(QPointF(200.0, 100.0))
polygon.append(QPointF(100.0, 200.0))
p = QgsLayoutItemPolygon(polygon, self.layout)
self.assertEqual(p.nodes(), polygon)
polygon = QPolygonF()
polygon.append(QPointF(0.0, 0.0))
polygon.append(QPointF(1000.0, 0.0))
polygon.append(QPointF(2000.0, 100.0))
polygon.append(QPointF(1000.0, 200.0))
p.setNodes(polygon)
self.assertEqual(p.nodes(), polygon)
def testDisplayName(self):
"""Test if displayName is valid"""
self.assertEqual(self.polygon.displayName(), "<Polygon>")
def testType(self):
"""Test if type is valid"""
self.assertEqual(
self.polygon.type(), QgsLayoutItemRegistry.LayoutPolygon)
def testDefaultStyle(self):
"""Test polygon rendering with default style."""
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testDisplayNodes(self):
"""Test displayNodes method"""
self.polygon.setDisplayNodes(True)
checker = QgsLayoutChecker(
'composerpolygon_displaynodes', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testSelectedNode(self):
"""Test selectedNode and deselectNode methods"""
self.polygon.setDisplayNodes(True)
self.polygon.setSelectedNode(3)
checker = QgsLayoutChecker(
'composerpolygon_selectednode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.polygon.deselectNode()
self.polygon.setDisplayNodes(False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testRemoveNode(self):
"""Test removeNode method"""
rc = self.polygon.removeNode(100)
self.assertEqual(rc, False)
checker = QgsLayoutChecker(
'composerpolygon_defaultstyle', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
self.assertEqual(self.polygon.nodesSize(), 4)
def testAddNode(self):
"""Test addNode method"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 10.0))
self.assertEqual(rc, False)
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 9.99))
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
def testAddNodeCustomRadius(self):
"""Test addNode with custom radius"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 8.1), True, 8.0)
self.assertEqual(rc, False)
self.assertEqual(self.polygon.nodesSize(), 4)
# default searching radius is 10
rc = self.polygon.addNode(QPointF(50.0, 7.9), True, 8.0)
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
def testAddNodeWithoutCheckingArea(self):
"""Test addNode without checking the maximum distance allowed"""
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 20.0))
self.assertEqual(rc, False)
self.assertEqual(self.polygon.nodesSize(), 4)
# default searching radius is 10
self.assertEqual(self.polygon.nodesSize(), 4)
rc = self.polygon.addNode(QPointF(50.0, 20.0), False)
self.assertEqual(rc, True)
self.assertEqual(self.polygon.nodesSize(), 5)
checker = QgsLayoutChecker(
'composerpolygon_addnode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testMoveNode(self):
"""Test moveNode method"""
rc = self.polygon.moveNode(30, QPointF(100.0, 300.0))
self.assertEqual(rc, False)
rc = self.polygon.moveNode(3, QPointF(100.0, 150.0))
self.assertEqual(rc, True)
checker = QgsLayoutChecker(
'composerpolygon_movenode', self.layout)
checker.setControlPathPrefix("composer_polygon")
myTestResult, myMessage = checker.testLayout()
assert myTestResult, myMessage
def testNodeAtPosition(self):
"""Test nodeAtPosition method"""
p = QPolygonF()
p.append(QPointF(0.0, 0.0))
p.append(QPointF(100.0, 0.0))
p.append(QPointF(200.0, 100.0))
p.append(QPointF(100.0, 200.0))
polygon = QgsLayoutItemPolygon(p, self.layout)
# default searching radius is 10
rc = polygon.nodeAtPosition(QPointF(100.0, 210.0))
self.assertEqual(rc, -1)
# default searching radius is 10
rc = polygon.nodeAtPosition(
QPointF(100.0, 210.0), False)
self.assertEqual(rc, 3)
# default searching radius is 10
rc = polygon.nodeAtPosition(
QPointF(100.0, 210.0), True, 10.1)
self.assertEqual(rc, 3)
def testReadWriteXml(self):
pr = QgsProject()
l = QgsLayout(pr)
p = QPolygonF()
p.append(QPointF(0.0, 0.0))
p.append(QPointF(100.0, 0.0))
p.append(QPointF(200.0, 100.0))
shape = QgsLayoutItemPolygon(p, l)
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "red"
props["width_border"] = "10.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
shape.setSymbol(style)
#save original item to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(shape.writeXml(elem, doc, QgsReadWriteContext()))
shape2 = QgsLayoutItemPolygon(l)
self.assertTrue(shape2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(shape2.nodes(), shape.nodes())
self.assertEqual(shape2.symbol().symbolLayer(0).color().name(), '#008000')
self.assertEqual(shape2.symbol().symbolLayer(0).strokeColor().name(), '#ff0000')
def testBounds(self):
pr = QgsProject()
l = QgsLayout(pr)
p = QPolygonF()
p.append(QPointF(50.0, 30.0))
p.append(QPointF(100.0, 10.0))
p.append(QPointF(200.0, 100.0))
shape = QgsLayoutItemPolygon(p, l)
props = {}
props["color"] = "green"
props["style"] = "solid"
props["style_border"] = "solid"
props["color_border"] = "red"
props["width_border"] = "6.0"
props["joinstyle"] = "miter"
style = QgsFillSymbol.createSimple(props)
shape.setSymbol(style)
# scene bounding rect should include symbol outline
bounds = shape.sceneBoundingRect()
self.assertEqual(bounds.left(), 47.0)
self.assertEqual(bounds.right(), 203.0)
self.assertEqual(bounds.top(), 7.0)
self.assertEqual(bounds.bottom(), 103.0)
# rectWithFrame should include symbol outline too
bounds = shape.rectWithFrame()
self.assertEqual(bounds.left(), -3.0)
self.assertEqual(bounds.right(), 153.0)
self.assertEqual(bounds.top(), -3.0)
self.assertEqual(bounds.bottom(), 93.0)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -3,450,841,726,710,574,600 | 32.768025 | 93 | 0.624768 | false |
ecoal95/servo | tests/wpt/web-platform-tests/network-error-logging/support/lock.py | 19 | 1713 | # This file implements a shared lock that lets us ensure that the test cases in
# this directory run serially. Each test case obtains this lock as its first
# step, and releases it as its last. (The nel_test helper function in
# nel.sub.js automates this process.) Because the lock needs to be shared
# across all of the test cases, we use a hard-coded stash key. This hard-coded
# key is a random UUID, which should not conflict with any other auto-generated
# stash keys.
import time
_LOCK_KEY = "67966d2e-a847-41d8-b7c3-5f6aee3375ba"
_TIMEOUT = 5 # seconds
def wait_for_lock(request):
t0 = time.time()
while time.time() - t0 < _TIMEOUT:
time.sleep(0.5)
value = request.server.stash.take(key=_LOCK_KEY)
if value is None:
return True
return False
def lock(request, report_id):
with request.server.stash.lock:
# Loop until the lock is free
if not wait_for_lock(request):
return (503, [], "Cannot obtain lock")
request.server.stash.put(key=_LOCK_KEY, value=report_id)
return "Obtained lock for %s" % report_id
def unlock(request, report_id):
with request.server.stash.lock:
lock_holder = request.server.stash.take(key=_LOCK_KEY)
if lock_holder != report_id:
# Return the lock holder to the stash
request.server.stash.put(key=_LOCK_KEY, value=lock_holder)
return (503, [], "Cannot release lock held by %s" % lock_holder)
return "Released lock for %s" % report_id
def main(request, response):
op = request.GET.first("op")
report_id = request.GET.first("reportID")
if op == "lock":
return lock(request, report_id)
elif op == "unlock":
return unlock(request, report_id)
else:
return (400, [], "Invalid op")
| mpl-2.0 | -4,616,994,438,030,963,000 | 34.6875 | 79 | 0.689434 | false |
twschiller/open-synthesis | openach/migrations/0030_auto_20161008_1249.py | 1 | 1238 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-08 12:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("openach", "0029_auto_20161004_0323"),
]
operations = [
migrations.AddField(
model_name="evidencesource",
name="source_url_description",
field=models.CharField(
default="", max_length=1000, verbose_name="source url description"
),
),
migrations.AddField(
model_name="evidencesource",
name="source_url_title",
field=models.CharField(
default="", max_length=255, verbose_name="source url title"
),
),
migrations.AlterField(
model_name="usersettings",
name="digest_frequency",
field=models.PositiveSmallIntegerField(
choices=[(0, "Never"), (1, "Daily"), (2, "Weekly")],
default=1,
help_text="How frequently to receive email updates containing missed notifications",
verbose_name="email digest frequency",
),
),
]
| gpl-3.0 | -3,468,381,997,329,084,400 | 30.74359 | 100 | 0.551696 | false |
mitmproxy/mitmproxy | mitmproxy/contentviews/xml_html.py | 2 | 7375 | import io
import re
import textwrap
from typing import Iterable, Optional
from mitmproxy.contentviews import base
from mitmproxy.utils import sliding_window, strutils
"""
A custom XML/HTML prettifier. Compared to other prettifiers, its main features are:
- Implemented in pure Python.
- Modifies whitespace only.
- Works with any input.
- Lazy evaluation.
The implementation is split into two main parts: tokenization and formatting of tokens.
"""
# http://www.xml.com/pub/a/2001/07/25/namingparts.html - this is close enough for what we do.
REGEX_TAG = re.compile(r"[a-zA-Z0-9._:\-]+(?!=)")
# https://www.w3.org/TR/html5/syntax.html#void-elements
HTML_VOID_ELEMENTS = {
"area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param",
"source", "track", "wbr"
}
NO_INDENT_TAGS = {"xml", "doctype", "html"}
INDENT = 2
class Token:
def __init__(self, data):
self.data = data
def __repr__(self):
return "{}({})".format(
type(self).__name__,
self.data
)
class Text(Token):
@property
def text(self):
return self.data.strip()
class Tag(Token):
@property
def tag(self):
t = REGEX_TAG.search(self.data)
if t is not None:
return t.group(0).lower()
return "<empty>"
@property
def is_comment(self) -> bool:
return self.data.startswith("<!--")
@property
def is_cdata(self) -> bool:
return self.data.startswith("<![CDATA[")
@property
def is_closing(self):
return self.data.startswith("</")
@property
def is_self_closing(self):
return self.is_comment or self.is_cdata or self.data.endswith(
"/>") or self.tag in HTML_VOID_ELEMENTS
@property
def is_opening(self):
return not self.is_closing and not self.is_self_closing
@property
def done(self):
if self.is_comment:
return self.data.endswith("-->")
elif self.is_cdata:
return self.data.endswith("]]>")
else:
# This fails for attributes that contain an unescaped ">"
return self.data.endswith(">")
def tokenize(data: str) -> Iterable[Token]:
token: Token = Text("")
i = 0
def readuntil(char, start, include=1):
nonlocal i
end = data.find(char, start)
if end == -1:
end = len(data)
ret = data[i:end + include]
i = end + include
return ret
while i < len(data):
if isinstance(token, Text):
token.data = readuntil("<", i, 0)
if token.text:
yield token
token = Tag("")
elif isinstance(token, Tag):
token.data += readuntil(">", i, 1)
if token.done:
yield token
token = Text("")
if token.data.strip():
yield token
def indent_text(data: str, prefix: str) -> str:
# Add spacing to first line so that we dedent in cases like this:
# <li>This is
# example text
# over multiple lines
# </li>
dedented = textwrap.dedent(" " * 32 + data).strip()
return textwrap.indent(dedented, prefix[:32])
def is_inline_text(a: Optional[Token], b: Optional[Token], c: Optional[Token]) -> bool:
if isinstance(a, Tag) and isinstance(b, Text) and isinstance(c, Tag):
if a.is_opening and "\n" not in b.data and c.is_closing and a.tag == c.tag:
return True
return False
def is_inline(prev2: Optional[Token], prev1: Optional[Token], t: Optional[Token], next1: Optional[Token], next2: Optional[Token]) -> bool:
if isinstance(t, Text):
return is_inline_text(prev1, t, next1)
elif isinstance(t, Tag):
if is_inline_text(prev2, prev1, t) or is_inline_text(t, next1, next2):
return True
if isinstance(next1, Tag) and t.is_opening and next1.is_closing and t.tag == next1.tag:
return True # <div></div> (start tag)
if isinstance(prev1, Tag) and prev1.is_opening and t.is_closing and prev1.tag == t.tag:
return True # <div></div> (end tag)
return False
class ElementStack:
"""
Keep track of how deeply nested our document is.
"""
def __init__(self):
self.open_tags = []
self.indent = ""
def push_tag(self, tag: str):
if len(self.open_tags) > 16:
return
self.open_tags.append(tag)
if tag not in NO_INDENT_TAGS:
self.indent += " " * INDENT
def pop_tag(self, tag: str):
if tag in self.open_tags:
remove_indent = 0
while True:
t = self.open_tags.pop()
if t not in NO_INDENT_TAGS:
remove_indent += INDENT
if t == tag:
break
self.indent = self.indent[:-remove_indent]
else:
pass # this closing tag has no start tag. let's keep indentation as-is.
def format_xml(tokens: Iterable[Token]) -> str:
out = io.StringIO()
context = ElementStack()
for prev2, prev1, token, next1, next2 in sliding_window.window(tokens, 2, 2):
if isinstance(token, Tag):
if token.is_opening:
out.write(indent_text(token.data, context.indent))
if not is_inline(prev2, prev1, token, next1, next2):
out.write("\n")
context.push_tag(token.tag)
elif token.is_closing:
context.pop_tag(token.tag)
if is_inline(prev2, prev1, token, next1, next2):
out.write(token.data)
else:
out.write(indent_text(token.data, context.indent))
out.write("\n")
else: # self-closing
out.write(indent_text(token.data, context.indent))
out.write("\n")
elif isinstance(token, Text):
if is_inline(prev2, prev1, token, next1, next2):
out.write(token.text)
else:
out.write(indent_text(token.data, context.indent))
out.write("\n")
else: # pragma: no cover
raise RuntimeError()
return out.getvalue()
class ViewXmlHtml(base.View):
name = "XML/HTML"
__content_types = ("text/xml", "text/html")
def __call__(self, data, **metadata):
# TODO:
# We should really have the message text as str here,
# not the message content as bytes.
# https://github.com/mitmproxy/mitmproxy/issues/1662#issuecomment-266192578
data = data.decode("utf8", "xmlcharrefreplace")
tokens = tokenize(data)
# TODO:
# Performance: Don't render the whole document right away.
# Let's wait with this until we have a sequence-like interface,
# this thing is reasonably fast right now anyway.
pretty = base.format_text(format_xml(tokens))
if "html" in data.lower():
t = "HTML"
else:
t = "XML"
return t, pretty
def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float:
if content_type in self.__content_types:
return 1
elif strutils.is_xml(data):
return 0.4
return float(content_type in self.__content_types)
| mit | -7,197,868,616,178,917,000 | 29.475207 | 138 | 0.565966 | false |
hitdong/pyvision | src/pyvision/analysis/face.py | 3 | 8265 | # PyVision License
#
# Copyright (c) 2006-2008 David S. Bolme
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither name of copyright holders nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import pyvision as pv
from pyvision.analysis.FaceAnalysis.FaceDetectionTest import is_success
class EyesFile:
'''
Reads and manages the data in an eye coordinate file.
'''
def __init__(self,filename):
'''
Inits and reads in the data.
'''
self.filename = filename
self.images = {}
self._readEyesFile()
def files(self):
'''
Returns the list of file names.
'''
names = self.images.keys()
names.sort()
return names
def findFace(self,filename,rect):
fname = self._parseName(filename)
if self.images.has_key(fname):
faces = self.images[fname]
for each in faces:
truth_rect = each[3]
if is_success(truth_rect,rect):
return each
return None
def getFaces(self,filename):
fname = self._parseName(filename)
if self.images.has_key(fname):
faces = self.images[fname]
boxes = []
for _,_,_,box in faces:
boxes.append(box)
return boxes
return []
def getEyes(self,filename):
fname = self._parseName(filename)
if self.images.has_key(fname):
faces = self.images[fname]
eyes = []
for _,left,right,_ in faces:
eyes.append([left,right])
return eyes
return []
def _readEyesFile(self):
'''
Private: Do not call directly. Reads the eye file.
'''
if self.filename[-4:] == '.csv':
f = open(self.filename,'r')
for line in f:
#print line,
line = line.split(',')
fname = self._parseName(line[0])
eye1 = pv.Point(float(line[1]),float(line[2]))
eye2 = pv.Point(float(line[3]),float(line[4]))
truth_rect = pv.BoundingRect(eye1,eye2)
truth_rect.w = 2.0 * truth_rect.w
truth_rect.h = truth_rect.w
truth_rect.x = truth_rect.x - 0.25*truth_rect.w
truth_rect.y = truth_rect.y - 0.3*truth_rect.w
#print fname,eye1,eye2,truth_rect
if not self.images.has_key(fname):
self.images[fname] = []
self.images[fname].append([fname,eye1,eye2,truth_rect])
else:
f = open(self.filename,'r')
for line in f:
#print line,
line = line.split()
fname = self._parseName(line[0])
eye1 = pv.Point(float(line[1]),float(line[2]))
eye2 = pv.Point(float(line[3]),float(line[4]))
truth_rect = pv.BoundingRect(eye1,eye2)
truth_rect.w = 2.0 * truth_rect.w
truth_rect.h = truth_rect.w
truth_rect.x = truth_rect.x - 0.25*truth_rect.w
truth_rect.y = truth_rect.y - 0.3*truth_rect.w
#print fname,eye1,eye2,truth_rect
if not self.images.has_key(fname):
self.images[fname] = []
self.images[fname].append([fname,eye1,eye2,truth_rect])
def _parseName(self,fname):
'''
Private: Do not call directly. Parses the base filename.
'''
fname = os.path.basename(fname)
fname = os.path.splitext(fname)[0]
return fname
class CSU_SRT:
class ImageRecord:
def __init__(self,filename,subject_id,image_id):
self.filename = filename
self.subject_id = subject_id
self.image_id = image_id
def __init__(self,filename):
'''Process a Subject Replicate Table file'''
self.images = []
self.filenames = {}
f = open(filename,'r')
subject_id = 0
image_id = 0
filename = None
for line in f:
images = line.split()
if images:
for image in images:
name = image.split('.')[0]
image_id += 1
print name, image_id, subject_id
ir = CSU_SRT.ImageRecord(name,subject_id,image_id)
self.images.append(ir)
self.filenames[name] = ir
subject_id += 1
self.total_subjects = subject_id
self.total_images = image_id
def getNames(self):
tmp = self.filenames.keys()
tmp.sort()
return tmp;
def getRecord(self,name):
if self.filenames.has_key(name):
return self.filenames[name]
return None
class CSU_Dist:
def __init__(self,directory,srt,extention='.sfi'):
#names = srt.getNames()
self.matrix = {}
self.srt = srt
count = 0
for iname in srt.getNames():
self.matrix[iname] = {}
filename = directory+'/'+iname+extention
print "Reading:",iname
f = open(filename,'r')
for line in f:
jname,dist = line.split()
jname = jname.split('.')[0]
if srt.getRecord(jname):
self.matrix[iname][jname] = -float(dist)
count += 1
print "Read:",count
def getPosNeg(self):
names = self.srt.getNames()
pos = []
neg = []
for i in range(len(names)):
for j in range(i+1,len(names)):
iname = names[i]
jname = names[j]
if self.srt.getRecord(iname).subject_id == self.srt.getRecord(jname).subject_id:
pos.append(self.matrix[iname][jname])
else:
neg.append(self.matrix[iname][jname])
return pos,neg
if __name__ == "__main__":
srt = CSU_SRT("/Users/bolme/vision/csuFaceIdBenchmark/imagelists/list640.srt")
ebgm_dist = CSU_Dist("/Users/bolme/vision/csuFaceIdBenchmark/distances/feret/EBGM",srt)
pca_dist = CSU_Dist("/Users/bolme/vision/csuFaceIdBenchmark/distances/feret/PCA_Euclidean",srt)
ebgm_pos, ebgm_neg = ebgm_dist.getPosNeg()
pca_pos, pca_neg = pca_dist.getPosNeg()
from pyvis.analysis.roc import *
ebgm_roc = pv.ROC(ebgm_pos,ebgm_neg)
pca_roc = pv.ROC(pca_pos,pca_neg)
| bsd-3-clause | -5,918,786,291,849,613,000 | 33.016461 | 100 | 0.538778 | false |
LighthouseHPC/lighthouse | sandbox/lily/django_orthg/dojango/bin/dojobuild.py | 13 | 1261 | #!/usr/bin/env python
# This is the alternate dojo build command so it can be used
# with older versions of django (mainly because of AppEngine, it uses version 0.96)
import os
import sys
from optparse import OptionParser
def setup_environ():
# we assume, that dojango is installed within your django's project dir
project_directory = os.path.abspath(os.path.dirname(__file__)+'/../../')
settings_filename = "settings.py"
if not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
settings_name = os.path.splitext(settings_filename)[0]
sys.path.append(project_directory)
sys.path.append(os.path.abspath(project_directory + "/.."))
project_module = __import__(project_name, {}, {}, [''])
sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
return project_directory
project_dir = setup_environ()
from dojango.management.commands.dojobuild import Command
if __name__ == "__main__":
my_build = Command()
parser = OptionParser(option_list=my_build.option_list)
options, args = parser.parse_args(sys.argv)
my_build.handle(*args[1:], **options.__dict__) | mit | 876,172,679,731,151,500 | 39.709677 | 83 | 0.693101 | false |
illicitonion/givabit | lib/sdks/google_appengine_1.7.1/google_appengine/lib/django_0_96/django/db/models/loading.py | 32 | 4520 | "Utilities for loading models and the modules that contain them."
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import sys
import os
__all__ = ('get_apps', 'get_app', 'get_models', 'get_model', 'register_models')
_app_list = [] # Cache of installed apps.
# Entry is not placed in app_list cache until entire app is loaded.
_app_models = {} # Dictionary of models against app label
# Each value is a dictionary of model name: model class
# Applabel and Model entry exists in cache when individual model is loaded.
_app_errors = {} # Dictionary of errors that were experienced when loading the INSTALLED_APPS
# Key is the app_name of the model, value is the exception that was raised
# during model loading.
_loaded = False # Has the contents of settings.INSTALLED_APPS been loaded?
# i.e., has get_apps() been called?
def get_apps():
"Returns a list of all installed modules that contain models."
global _app_list
global _loaded
if not _loaded:
_loaded = True
for app_name in settings.INSTALLED_APPS:
try:
load_app(app_name)
except Exception, e:
# Problem importing the app
_app_errors[app_name] = e
return _app_list
def get_app(app_label, emptyOK=False):
"Returns the module containing the models for the given app_label. If the app has no models in it and 'emptyOK' is True, returns None."
get_apps() # Run get_apps() to populate the _app_list cache. Slightly hackish.
for app_name in settings.INSTALLED_APPS:
if app_label == app_name.split('.')[-1]:
mod = load_app(app_name)
if mod is None:
if emptyOK:
return None
else:
return mod
raise ImproperlyConfigured, "App with label %s could not be found" % app_label
def load_app(app_name):
"Loads the app with the provided fully qualified name, and returns the model module."
global _app_list
mod = __import__(app_name, {}, {}, ['models'])
if not hasattr(mod, 'models'):
return None
if mod.models not in _app_list:
_app_list.append(mod.models)
return mod.models
def get_app_errors():
"Returns the map of known problems with the INSTALLED_APPS"
global _app_errors
get_apps() # Run get_apps() to populate the _app_list cache. Slightly hackish.
return _app_errors
def get_models(app_mod=None):
"""
Given a module containing models, returns a list of the models. Otherwise
returns a list of all installed models.
"""
app_list = get_apps() # Run get_apps() to populate the _app_list cache. Slightly hackish.
if app_mod:
return _app_models.get(app_mod.__name__.split('.')[-2], {}).values()
else:
model_list = []
for app_mod in app_list:
model_list.extend(get_models(app_mod))
return model_list
def get_model(app_label, model_name, seed_cache=True):
"""
Returns the model matching the given app_label and case-insensitive
model_name.
Returns None if no model is found.
"""
if seed_cache:
get_apps()
try:
model_dict = _app_models[app_label]
except KeyError:
return None
try:
return model_dict[model_name.lower()]
except KeyError:
return None
def register_models(app_label, *models):
"""
Register a set of models as belonging to an app.
"""
for model in models:
# Store as 'name: model' pair in a dictionary
# in the _app_models dictionary
model_name = model._meta.object_name.lower()
model_dict = _app_models.setdefault(app_label, {})
if model_dict.has_key(model_name):
# The same model may be imported via different paths (e.g.
# appname.models and project.appname.models). We use the source
# filename as a means to detect identity.
fname1 = os.path.abspath(sys.modules[model.__module__].__file__)
fname2 = os.path.abspath(sys.modules[model_dict[model_name].__module__].__file__)
# Since the filename extension could be .py the first time and .pyc
# or .pyo the second time, ignore the extension when comparing.
if os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]:
continue
model_dict[model_name] = model
| apache-2.0 | -2,887,615,205,250,518,000 | 37.965517 | 139 | 0.620133 | false |
scorphus/politicos | tests/unit/models/test_legislator.py | 1 | 3646 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Marcelo Jorge Vieira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from preggy import expect
from mock import patch, call
from politicos.models.legislator import Legislator
from politicos.utils import date_to_timestamp
from tests.unit.base import ApiTestCase
from tests.fixtures import LegislatorFactory
class TestLegislator(ApiTestCase):
def test_can_create_legislator(self):
date = datetime.utcnow().date()
legislator = LegislatorFactory.create(
name='Marcelo Jorge Vieira',
picture='http://domain.com/picture.png',
website='http://domain.com/',
email='[email protected]',
gender='M',
date_of_birth=date,
about='Heavy Metal',
)
expect(legislator.id).not_to_be_null()
expect(legislator.name).to_equal('Marcelo Jorge Vieira')
expect(legislator.picture).to_equal('http://domain.com/picture.png')
expect(legislator.website).to_equal('http://domain.com/')
expect(legislator.email).to_equal('[email protected]')
expect(legislator.gender).to_equal('M')
expect(legislator.date_of_birth).to_equal(date)
expect(legislator.about).to_equal('Heavy Metal')
def test_can_convert_to_dict(self):
legislator = LegislatorFactory.create()
legislator_dict = legislator.to_dict()
expect(legislator_dict.keys()).to_length(7)
expect(legislator_dict.keys()).to_be_like([
'name', 'picture', 'website', 'email', 'gender',
'date_of_birth', 'about',
])
date_of_birth = date_to_timestamp(legislator.date_of_birth)
expect(legislator_dict['name']).to_equal(legislator.name)
expect(legislator_dict['picture']).to_equal(legislator.picture)
expect(legislator_dict['website']).to_equal(legislator.website)
expect(legislator_dict['email']).to_equal(legislator.email)
expect(legislator_dict['gender']).to_equal(legislator.gender)
expect(legislator_dict['date_of_birth']).to_equal(date_of_birth)
expect(legislator_dict['about']).to_equal(legislator.about)
def test_can_convert_to_dict_with_none_date_of_birth(self):
legislator = LegislatorFactory.create()
legislator.date_of_birth = None
legislator_dict = legislator.to_dict()
expect(legislator_dict['date_of_birth']).to_be_null()
@patch('politicos.models.legislator.logging')
def test_can_add_legislator(self, logging_mock):
date_of_birth = date_to_timestamp(datetime.utcnow().date())
data = {'name': 'Marcelo Jorge Vieira', 'date_of_birth': date_of_birth}
legislator = Legislator.add_legislator(self.db, data)
expect(legislator.name).to_equal('Marcelo Jorge Vieira')
expect(logging_mock.mock_calls).to_include(
call.debug('Added legislator: "%s"', 'Marcelo Jorge Vieira')
)
| agpl-3.0 | -8,097,954,450,932,880,000 | 39.511111 | 79 | 0.673066 | false |
novaspirit/eloipool | agplcompliance.py | 6 | 3204 | # Eloipool - Python Bitcoin pool server
# Copyright (C) 2012-2014 Luke Dashjr <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import stat
# It is not legal to bypass or lie to this check. See LICENSE file for details.
try:
_srcdir = os.path.dirname(os.path.abspath(__file__))
if os.path.exists(_srcdir + '/.I_swear_that_I_am_Luke_Dashjr'):
_SourceFiles = None
else:
_SourceFiles = os.popen('cd \'%s\' && git ls-files' % (_srcdir,)).read().split('\n')
try:
_SourceFiles.remove('')
except ValueError:
pass
if len(_SourceFiles) < 2:
raise RuntimeError('Unknown error')
_SourceFiles = tuple(x.encode('utf8') for x in _SourceFiles)
_GitDesc = os.popen('cd \'%s\' && git describe --dirty --always' % (_srcdir,)).read().strip().encode('utf8')
except BaseException as e:
logging.getLogger('Licensing').critical('Error getting list of source files! AGPL requires this. To fix, be sure you are using git for Eloipool.\n' + traceback.format_exc())
import sys
sys.exit(1)
# For AGPL compliance, allow direct downloads of source code
def get_source(p):
if _SourceFiles is None:
return None
if p == b'':
# List of files
body = b'<html><head><title>Source Code</title></head><body>\t\n'
body += b'\t<a href="tar">(tar archive of all files)</a><br><br>\n'
for f in _SourceFiles:
body += b'\t<a href="' + f + b'">\n' + f + b'\n\t</a><br>\n'
body += b'\t</body></html>\n'
return ('text/html', body)
if p == b'tar':
body = bytearray()
dn = b'eloipool-' + _GitDesc + b'/'
for f in _SourceFiles:
fs = f.decode('utf8')
fstat = os.lstat(fs)
islink = stat.S_ISLNK(fstat.st_mode)
if islink:
data = b''
link = os.readlink(f)
else:
with open("%s/%s" % (_srcdir, fs), 'rb') as ff:
data = ff.read()
link = b''
h = bytearray()
f = dn + f
h += f + bytes(max(0, 100 - len(f)))
h += ('%07o' % (fstat.st_mode,)[-7:]).encode('utf8') + b'\0'
h += bytes(16)
h += ('%012o%012o' % (fstat.st_size, fstat.st_mtime)).encode('utf8')
h += b' ' # chksum
h += b'2' if islink else b'0'
h += link + bytes(max(0, 355 - len(link)))
h[148:156] = ('%07o' % (sum(h),)).encode('utf8') + b'\0'
body += h + data + bytes(512 - ((fstat.st_size % 512) or 512))
return ('application/x-tar', body)
if p not in _SourceFiles:
return None
ct = 'text/plain'
if p[-3:] == b'.py': ct = 'application/x-python'
elif p[-11:] == b'.py.example': ct = 'application/x-python'
p = p.decode('utf8')
with open("%s/%s" % (_srcdir, p), 'rb') as f:
return (ct, f.read())
| agpl-3.0 | -5,041,123,365,346,520,000 | 36.255814 | 174 | 0.633895 | false |
nive/nive | nive/extensions/tests/test_persistence.py | 1 | 1531 | # -*-coding:utf-8 -*-
import unittest
from nive.extensions.persistence import *
from nive.definitions import Conf
from nive.helper import FormatConfTestFailure
from nive.tests import db_app
from nive.tests import __local
class Persistence(unittest.TestCase):
def setUp(self):
self.app = db_app.app_nodb()
def tearDown(self):
self.app.Close()
def test_1(self):
p = PersistentConf(self.app, self.app.configuration)
def test_2(self):
LoadStoredConfValues(self.app, None)
class tdbPersistence(__local.DefaultTestCase):
def setUp(self):
self._loadApp(["nive.extensions.persistence.dbPersistenceConfiguration"])
def tearDown(self):
self._closeApp()
def test_conf(self):
r=dbPersistenceConfiguration.test()
if not r:
return
self.fail(FormatConfTestFailure(r))
def test_storedconf(self):
storage = self.app.NewModule(IModuleConf, "persistence")
self.assertTrue(storage)
LoadStoredConfValues(self.app, None)
def test_load(self):
storage = self.app.NewModule(IModuleConf, "persistence")
self.assertTrue(storage)
storage(self.app, Conf(id="test")).Save({"title":"öäüß", "something": 123})
values = Conf(id="test")
storage(self.app, values).Load()
self.assertTrue(values["something"] == 123)
self.assertTrue(values["title"] == "öäüß") | gpl-3.0 | -624,896,616,748,852,700 | 23.190476 | 83 | 0.617859 | false |
deter-project/magi | scripts/magi_generate_group_aal.py | 1 | 2763 | #!/usr/bin/env python
import sys
import yaml
import subprocess
import re # great, now i've got two problems.
import time
from os.path import basename
from collections import defaultdict
from optparse import OptionParser
if __name__ == '__main__':
script_name = basename(sys.argv[0])
usage = 'Usage: %s [options] group=regex ... group=regex' % script_name
usage += ('\n\n\tWhere "group" is the group name added to the AAL '
'\n\tand "regex" is the regular expression applied '
'\n\tto all node names in the experiment.'
'\n\n\tFor example:\n\n\t\t> %s -e deter,myExp myFoo=\'^foo.*\''
'\n\n\twould create an AAL with a "myFoo" group which contains all nodes '
'\n\tin the experiment deter,myExp whose names start with foo.'
'\n\n\tThe script always creates a group "all_nodes" which contains'
'\n\tall nodes in the experiment. The AAL is written to stdout.' % script_name)
parser = OptionParser(usage=usage)
parser.add_option('-e', '--experiment',
help='The experiment to generate AAL from. Must be of the form GROUP,EXPERIMENT. '
'i.e. DETER,myExperiment')
parser.add_option('-c', '--container', action='store_true', help='This experiment has containers.')
(opts, args) = parser.parse_args()
if not opts.experiment:
print 'Required argument --experiment (or -e) missing.'
sys.exit(1)
container = '-c' if opts.container else ''
list_node_cmd = ('/usr/testbed/bin/script_wrapper.py node_list '
'-v -e %s %s' % (opts.experiment, container))
try:
proc = subprocess.Popen(list_node_cmd.split(), stdout=subprocess.PIPE)
proc.wait()
if proc.returncode != 0:
print 'Error getting experiment nodes. Is it swapped in?'
sys.exit(1)
node_list = [node for node in proc.communicate()[0].split()]
except Exception as e:
print 'Error generating node list in experiment %s: %s' % (expname, e)
sys.exit(1)
aal_group_map = defaultdict(str)
aal_group_map['all_nodes'] = '.*'
for arg in args:
if -1 != arg.find('='):
key, value = arg.split('=')
aal_group_map[key] = value
aal = defaultdict(list)
for group, regex in aal_group_map.iteritems():
for node_name in node_list:
m = re.search(regex, node_name)
if m:
aal[group].append(node_name)
print '#'
print '# AAL generated by %s at %s' % (basename(sys.argv[0]), time.asctime(time.localtime()))
print '# from experiment %s' % opts.experiment
print '#'
print yaml.dump({'groups': dict(aal)})
| gpl-2.0 | -8,532,282,157,791,943,000 | 38.471429 | 104 | 0.593558 | false |
foxmask/django-th | th_evernote/tests.py | 1 | 6939 | # coding: utf-8
from django.conf import settings
from django.core.cache import caches
from django_th.tests.test_main import MainTest
from evernote.api.client import EvernoteClient
from th_evernote.models import Evernote
from th_evernote.forms import EvernoteProviderForm, EvernoteConsumerForm
from th_evernote.my_evernote import ServiceEvernote
from th_evernote.evernote_mgr import EvernoteMgr
from th_evernote.sanitize import sanitize
from unittest.mock import patch
try:
from unittest import mock
except ImportError:
import mock
cache = caches['django_th']
class EvernoteTest(MainTest):
"""
EvernoteTest Model
"""
def create_evernote(self):
trigger = self.create_triggerservice(consumer_name='ServiceEvernote')
tag = 'test'
notebook = 'my notebook'
title = 'a new note'
status = True
return Evernote.objects.create(tag=tag, title=title,
notebook=notebook, trigger=trigger,
status=status)
class EvernoteView(EvernoteTest):
def test_evernote(self):
ev = self.create_evernote()
self.assertIsInstance(ev, Evernote)
self.assertEqual(ev.show(), "My Evernote {}".format(ev.title))
self.assertEqual(ev.__str__(), ev.title)
"""
Form
"""
# provider
def test_valid_provider_form(self):
ev = self.create_evernote()
data = {'tag': ev.tag, 'notebook': ev.notebook}
form = EvernoteProviderForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_provider_form(self):
form = EvernoteProviderForm(data={})
self.assertFalse(form.is_valid())
# consumer
def test_valid_consumer_form(self):
ev = self.create_evernote()
data = {'tag': ev.tag, 'notebook': ev.notebook}
form = EvernoteConsumerForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_consumer_form(self):
form = EvernoteConsumerForm(data={})
self.assertFalse(form.is_valid())
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_EVERNOTE_KEY)
def test_get_services_list(self):
th_service = ('th_evernote.my_evernote.ServiceEvernote',)
for service in th_service:
self.assertIn(service, settings.TH_SERVICES)
class ServiceEvernoteTest(EvernoteTest):
"""
ServiceEvernoteTest
"""
def setUp(self):
super(ServiceEvernoteTest, self).setUp()
self.ev = self.create_evernote()
self.data = {'link': 'http://foo.bar/some/thing/else/what/else',
'title': 'what else',
'content': 'A nice content with a nice '
'<a href="http://domain.tld">foobar link</a>',
'summary_detail': 'summary foobar',
'description': 'description foobar'}
self.token = 'AZERTY123'
self.trigger_id = 1
self.service = ServiceEvernote(self.token)
def test_read_data(self):
kwargs = dict({'date_triggered': '2013-05-11 13:23:58+00:00',
'trigger_id': self.trigger_id})
with patch.object(EvernoteClient, 'get_note_store') as mock_ev:
se = ServiceEvernote(self.token)
se.read_data(**kwargs)
mock_ev.assert_called_once_with()
@patch.object(EvernoteClient, 'get_note_store')
# @patch.object(EvernoteMgr, 'create_note')
# def test_save_data(self, mock1, mock2):
def test_save_data(self, mock1):
self.assertTrue(self.token)
self.assertIsInstance(self.trigger_id, int)
self.assertIn('content', self.data)
self.assertIn('summary_detail', self.data)
self.assertIn('description', self.data)
self.assertIn('title', self.data)
self.assertIsNotNone(self.data['link'])
self.assertNotEqual(self.data['title'], '')
self.assertIn('sandbox', settings.TH_EVERNOTE_KEY)
se = ServiceEvernote(self.token)
se.save_data(self.trigger_id, **self.data)
mock1.assert_called_once()
# mock2.assert_called_once()
def test_get_config_th(self):
"""
does this settings exists ?
"""
self.assertTrue(settings.TH_EVERNOTE_KEY)
self.assertIn('consumer_key', settings.TH_EVERNOTE_KEY)
self.assertIn('consumer_secret', settings.TH_EVERNOTE_KEY)
self.assertIn('sandbox', settings.TH_EVERNOTE_KEY)
def test_get_evernote_client(self, token=None):
"""
get the token from evernote
"""
sandbox = settings.TH_EVERNOTE_KEY['sandbox']
client = mock.Mock(return_value=True)
client.method(token=token, sandbox=sandbox)
client.method.assert_called_with(token=token, sandbox=sandbox)
sandbox = settings.TH_EVERNOTE_KEY['sandbox']
consumer_key = settings.TH_EVERNOTE_KEY['consumer_key']
consumer_secret = settings.TH_EVERNOTE_KEY['consumer_secret']
client = mock.Mock(return_value=True)
client.method(consumer_key=consumer_key,
consumer_secret=consumer_secret, sandbox=sandbox)
client.method.assert_called_with(consumer_key=consumer_key,
consumer_secret=consumer_secret,
sandbox=sandbox)
def test_auth(self):
pass
def test_callback(self):
pass
def test_sanitize(self):
html = "<html><body>" \
"<p>coucou</p>" \
"<dir>foobar</dir>" \
"<div data-foobar='nothing'>foobar2</div>" \
"<a href='ftp://localhost'>dropped</a>" \
"<a href='http://localhost'>kept</a>" \
"</body></html>"
html = sanitize(html)
self.assertTrue("dir" not in html)
self.assertTrue("ftp" not in html)
self.assertTrue("data-foobar" not in html)
self.assertTrue("http" in html)
def test_set_header(self):
header = EvernoteMgr.set_header()
self.assertTrue('DOCTYPE en-note SYSTEM "http://xml.evernote.'
'com/pub/enml2.dtd' in header)
def test_footer(self):
content = ''
se = ServiceEvernote(self.token)
footer = se._footer(self.ev, self.data, content)
self.assertIsInstance(footer, str)
def test_cleaning_content(self):
se = ServiceEvernote(self.token)
self.assertIsInstance(se._cleaning_content('foobar'), str)
def test_set_note_attribute(self):
data = {}
attr = EvernoteMgr.set_note_attribute(data)
self.assertTrue(type(attr) is bool)
data = {'link': 'http://localhost'}
attr = EvernoteMgr.set_note_attribute(data)
self.assertTrue(type(attr) is not bool)
| bsd-3-clause | 5,888,162,812,482,918,000 | 32.684466 | 78 | 0.601095 | false |
asmacdo/pulp-automation | tests/general_tests/test_02_repo_applicability.py | 2 | 1154 | import json, unittest
from tests import pulp_test
from pulp_auto.repo import RepoAppl
from pulp_auto.task import Task
def setUpModule():
pass
class RepoApplicabilty(pulp_test.PulpTest):
def test_01_repo_content_applicability(self):
response = RepoAppl.applicability(self.pulp, data={
"repo_criteria": {"filters": {"id":{"$in":["test-repo", "test-errata"]}}}
}
)
self.assertPulp(code=202)
Task.wait_for_report(self.pulp, response)
# TODO: assert applicability tags in task response
# TODO: assert the applicability applies OK :) or is sane
def test_02_repo_content_applicability_invalid_param(self):
#response_code: 400,if one or more of the parameters is invalid
RepoAppl.applicability(self.pulp, data={
"invalid_parameter": {"filters": {"id":{"$in":["test-repo", "test-errata"]}}}
}
)
self.assertPulp(code=400)
| gpl-2.0 | 5,235,372,818,075,275,000 | 38.793103 | 118 | 0.525997 | false |
vkosuri/ChatterBot | tests/storage/test_sql_adapter.py | 2 | 15201 | from unittest import TestCase
from chatterbot.conversation import Statement
from chatterbot.storage.sql_storage import SQLStorageAdapter
class SQLStorageAdapterTestCase(TestCase):
@classmethod
def setUpClass(cls):
"""
Instantiate the adapter before any tests in the test case run.
"""
cls.adapter = SQLStorageAdapter(database_uri=None)
def tearDown(self):
"""
Drop the tables in the database after each test is run.
"""
self.adapter.drop()
class SQLStorageAdapterTests(SQLStorageAdapterTestCase):
def test_set_database_uri_none(self):
adapter = SQLStorageAdapter(database_uri=None)
self.assertEqual(adapter.database_uri, 'sqlite://')
def test_set_database_uri(self):
adapter = SQLStorageAdapter(database_uri='sqlite:///db.sqlite3')
self.assertEqual(adapter.database_uri, 'sqlite:///db.sqlite3')
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
self.adapter.create(text="Test statement")
self.assertEqual(self.adapter.count(), 1)
def test_filter_text_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
results = list(self.adapter.filter(text="Non-existant"))
self.assertEqual(len(results), 0)
def test_filter_text_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
text = "New statement"
self.adapter.create(text=text)
results = list(self.adapter.filter(text=text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, text)
def test_update_adds_new_statement(self):
statement = Statement(text="New statement")
self.adapter.update(statement)
results = list(self.adapter.filter(text="New statement"))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, statement.text)
def test_update_modifies_existing_statement(self):
statement = Statement(text="New statement")
self.adapter.update(statement)
# Check the initial values
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, None)
# Update the statement value
statement.in_response_to = "New response"
self.adapter.update(statement)
# Check that the values have changed
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, "New response")
def test_get_random_returns_statement(self):
self.adapter.create(text="New statement")
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, "New statement")
def test_get_random_no_data(self):
from chatterbot.storage import StorageAdapter
with self.assertRaises(StorageAdapter.EmptyDatabaseException):
self.adapter.get_random()
def test_remove(self):
text = "Sometimes you have to run before you can walk."
self.adapter.create(text=text)
self.adapter.remove(text)
results = self.adapter.filter(text=text)
self.assertEqual(list(results), [])
class SQLStorageAdapterFilterTests(SQLStorageAdapterTestCase):
def test_filter_text_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(text="Howdy"))
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(in_response_to="Maybe"))
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
text="Testing...",
in_response_to=None
)
statement2 = Statement(
text="Testing one, two, three.",
in_response_to=None
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = list(self.adapter.filter(in_response_to=None))
results_text = [
result.text for result in results
]
self.assertEqual(len(results), 2)
self.assertIn(statement1.text, results_text)
self.assertIn(statement2.text, results_text)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
self.adapter.create(text="Testing...")
self.adapter.create(text="Testing one, two, three.")
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
def test_filter_by_tag(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = self.adapter.filter(tags=["greeting"])
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hello!", results_text_list)
self.assertIn("Hi everyone!", results_text_list)
def test_filter_by_tags(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = self.adapter.filter(
tags=["exclamation", "fact"]
)
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hi everyone!", results_text_list)
self.assertIn("The air contains Oxygen.", results_text_list)
def test_filter_page_size(self):
self.adapter.create(text='A')
self.adapter.create(text='B')
self.adapter.create(text='C')
results = self.adapter.filter(page_size=2)
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 3)
self.assertIn('A', results_text_list)
self.assertIn('B', results_text_list)
self.assertIn('C', results_text_list)
def test_exclude_text(self):
self.adapter.create(text='Hello!')
self.adapter.create(text='Hi everyone!')
results = list(self.adapter.filter(
exclude_text=[
'Hello!'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_exclude_text_words(self):
self.adapter.create(text='This is a good example.')
self.adapter.create(text='This is a bad example.')
self.adapter.create(text='This is a worse example.')
results = list(self.adapter.filter(
exclude_text_words=[
'bad', 'worse'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'This is a good example.')
def test_persona_not_startswith(self):
self.adapter.create(text='Hello!', persona='bot:tester')
self.adapter.create(text='Hi everyone!', persona='user:person')
results = list(self.adapter.filter(
persona_not_startswith='bot:'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='everyone'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains_multiple_matches(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='hello everyone'
))
self.assertEqual(len(results), 2)
class SQLOrderingTests(SQLStorageAdapterTestCase):
"""
Test cases for the ordering of sets of statements.
"""
def test_order_by_text(self):
statement_a = Statement(text='A is the first letter of the alphabet.')
statement_b = Statement(text='B is the second letter of the alphabet.')
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['text']))
self.assertEqual(len(results), 2)
self.assertEqual(statement_a.text, results[0].text)
self.assertEqual(statement_b.text, results[1].text)
def test_order_by_created_at(self):
from datetime import datetime, timedelta
today = datetime.now()
yesterday = datetime.now() - timedelta(days=1)
statement_a = Statement(
text='A is the first letter of the alphabet.',
created_at=yesterday
)
statement_b = Statement(
text='B is the second letter of the alphabet.',
created_at=today
)
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['created_at']))
self.assertEqual(len(results), 2)
self.assertEqual(statement_a.text, results[0].text)
self.assertEqual(statement_b.text, results[1].text)
class StorageAdapterCreateTests(SQLStorageAdapterTestCase):
"""
Tests for the create function of the storage adapter.
"""
def test_create_text(self):
self.adapter.create(text='testing')
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'testing')
def test_create_search_text(self):
self.adapter.create(
text='testing',
search_text='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_text, 'test')
def test_create_search_in_response_to(self):
self.adapter.create(
text='testing',
search_in_response_to='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_in_response_to, 'test')
def test_create_tags(self):
self.adapter.create(text='testing', tags=['a', 'b'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertIn('a', results[0].get_tags())
self.assertIn('b', results[0].get_tags())
def test_create_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create(text='testing', tags=['ab', 'ab'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
def test_create_many_text(self):
self.adapter.create_many([
Statement(text='A'),
Statement(text='B')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].text, 'A')
self.assertEqual(results[1].text, 'B')
def test_create_many_search_text(self):
self.adapter.create_many([
Statement(text='A', search_text='a'),
Statement(text='B', search_text='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_text, 'a')
self.assertEqual(results[1].search_text, 'b')
def test_create_many_search_in_response_to(self):
self.adapter.create_many([
Statement(text='A', search_in_response_to='a'),
Statement(text='B', search_in_response_to='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_in_response_to, 'a')
self.assertEqual(results[1].search_in_response_to, 'b')
def test_create_many_tags(self):
self.adapter.create_many([
Statement(text='A', tags=['first', 'letter']),
Statement(text='B', tags=['second', 'letter'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertIn('letter', results[0].get_tags())
self.assertIn('letter', results[1].get_tags())
self.assertIn('first', results[0].get_tags())
self.assertIn('second', results[1].get_tags())
def test_create_many_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create_many([
Statement(text='testing', tags=['ab', 'ab'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
class StorageAdapterUpdateTests(SQLStorageAdapterTestCase):
"""
Tests for the update function of the storage adapter.
"""
def test_update_adds_tags(self):
statement = self.adapter.create(text='Testing')
statement.add_tags('a', 'b')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertIn('a', statements[0].get_tags())
self.assertIn('b', statements[0].get_tags())
def test_update_duplicate_tags(self):
"""
The storage adapter should not update a statement with tags
that are duplicates.
"""
statement = self.adapter.create(text='Testing', tags=['ab'])
statement.add_tags('ab')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertEqual(len(statements[0].get_tags()), 1)
self.assertEqual(statements[0].get_tags(), ['ab'])
| bsd-3-clause | 6,196,846,224,603,339,000 | 31.550321 | 82 | 0.616144 | false |
RincewindWizzard/gruppenkasse-gtk | src/stores.py | 1 | 2859 | #!/usr/bin/python3
# -*- encoding: utf-8 -*-
from gi.repository import Gtk, GObject
#class ListStoreFilter(Gtk.ListStore):
# def __init__(self, filter_func=None, modify_func=None, *args):
class SQLStore(Gtk.ListStore):
"""
Fills a Gtk.ListStore with data from a SQL query everytime you call update
"""
def __init__(self, session, query, data_func, *types):
"""
data_func(obj) is a function that takes an entry from the sql table an returns a tuple which has to contain the types given in types
IMPORTANT: I implicit assume, that obj has an id entry of type int which is the full primary key of the row, which is added to the result of data_func and used to update the row in liststore
"""
self._data_func = data_func
self.session = session
Gtk.ListStore.__init__(self, int, *types)
self.query = query
@property
def query(self):
return self._query
@query.setter
def query(self, q):
self._query = q
self.flush()
@property
def data_func(self):
return self._data_func
@data_func.setter
def data_func(self, func):
self._data_func = func
self.flush()
def flush(self):
""" Clears all data and populates the store from the query """
if self._data_func and self.query:
self.clear()
for obj in self.query.all():
self.add_row(obj)
def remove(self, index):
row = self[index]
self.query.filter_by(id=row[0]).delete() # deletes from undelying model
self.session.commit()
Gtk.ListStore.remove(self, index)
def append_object(self, obj):
self.session.add(obj)
self.session.commit()
index = self.add_row(obj)
return index
def add_row(self, obj):
row = (obj.id, ) + self.data_func(obj)
return self.append(row)
def update_row(self, index):
try:
index = self.get_iter(index)
if self.iter_is_valid(index):
row = self[index]
obj = self.query.filter_by(id=row[0]).first()
if obj:
new_row = self.data_func(obj)
for j, val in enumerate(new_row):
row[j + 1] = val
else:
self.remove(index)
except ValueError as e:
...
def update(self, *indices):
if self._data_func and self.query:
if len(indices) == 0:
for index, row in enumerate(self):
self.update_row(index)
else:
for index in indices:
self.update_row(index)
def get_object(self, index):
row = self[index]
return self.query.filter_by(id=row[0]).first()
| lgpl-3.0 | -7,831,182,648,781,740,000 | 29.414894 | 198 | 0.551242 | false |
yannrouillard/weboob | modules/poivy/browser.py | 2 | 2888 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Fourcot Florent
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword, BrowserBanned
from .pages import HomePage, LoginPage, HistoryPage, BillsPage, ErrorPage
__all__ = ['PoivyBrowser']
class PoivyBrowser(BaseBrowser):
DOMAIN = 'www.poivy.com'
PROTOCOL = 'https'
ENCODING = None # refer to the HTML encoding
PAGES = {'.*login': LoginPage,
'.*buy_credit.*': HomePage,
'.*/recent_calls': HistoryPage,
'.*purchases': BillsPage,
'.*warning.*': ErrorPage
}
def __init__(self, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
def home(self):
self.location('/login')
def is_logged(self):
return not self.is_on_page(LoginPage)
def login(self):
assert isinstance(self.username, basestring)
assert isinstance(self.password, basestring)
if not self.is_on_page(LoginPage):
self.location('/login')
if not self.page.login(self.username, self.password):
raise BrowserBanned('Too many connections from you IP address: captcha enabled')
if self.is_on_page(LoginPage) or self.is_on_page(ErrorPage):
raise BrowserIncorrectPassword()
def get_subscription_list(self):
if not self.is_on_page(HomePage):
self.location('/buy_credit')
return self.page.get_list()
def get_subscription(self, id):
assert isinstance(id, basestring)
l = self.get_subscription_list()
for a in l:
if a.id == id:
return a
return None
def get_history(self):
if not self.is_on_page(HistoryPage):
self.location('/recent_calls')
return self.page.get_calls()
def iter_bills(self, parentid):
if not self.is_on_page(BillsPage):
self.location('/purchases')
return self.page.date_bills()
def get_bill(self, id):
assert isinstance(id, basestring)
l = self.iter_bills(id)
for a in l:
if a.id == id:
return a
| agpl-3.0 | 6,548,889,652,903,258,000 | 30.391304 | 92 | 0.618767 | false |
pmghalvorsen/gramps_branch | gramps/gen/filters/rules/family/_memberbase.py | 3 | 1917 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Set of wrappers for family filter rules based on personal rules.
Any rule that matches family based on personal rule applied
to father, mother, or any child, just needs to do two things:
> Set the class attribute 'base_class' to the personal rule
> Set apply method to be an appropriate wrapper below
Example:
in the class body, outside any method:
> base_class = SearchName
> apply = child_base
"""
def father_base(self,db,family):
father_handle = family.get_father_handle()
father = db.get_person_from_handle(father_handle)
if father:
return self.base_class.apply(self,db,father)
else:
return False
def mother_base(self,db,family):
mother_handle = family.get_mother_handle()
mother = db.get_person_from_handle(mother_handle)
if mother:
return self.base_class.apply(self,db,mother)
else:
return False
def child_base(self,db,family):
for child_ref in family.get_child_ref_list():
child = db.get_person_from_handle(child_ref.ref)
if self.base_class.apply(self,db,child):
return True
return False
| gpl-2.0 | 871,916,708,985,938,700 | 33.854545 | 79 | 0.719875 | false |
Princu7/open-event-orga-server | app/models/speaker.py | 7 | 5370 | from app.helpers.versioning import clean_up_string, clean_html
from app.models import db
from app.helpers.helpers import ensure_social_link
class Speaker(db.Model):
"""Speaker model class"""
__tablename__ = 'speaker'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
photo = db.Column(db.String)
thumbnail = db.Column(db.String)
small = db.Column(db.String)
icon = db.Column(db.String)
short_biography = db.Column(db.Text)
long_biography = db.Column(db.Text)
speaking_experience = db.Column(db.Text)
email = db.Column(db.String, nullable=False)
mobile = db.Column(db.String)
website = db.Column(db.String)
twitter = db.Column(db.String)
facebook = db.Column(db.String)
github = db.Column(db.String)
linkedin = db.Column(db.String)
organisation = db.Column(db.String)
featured = db.Column(db.Boolean, default=False)
position = db.Column(db.String)
country = db.Column(db.String)
city = db.Column(db.String)
gender = db.Column(db.String)
heard_from = db.Column(db.String)
sponsorship_required = db.Column(db.Text)
event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'))
user_id = db.Column(db.Integer, db.ForeignKey('user.id', ondelete='SET NULL'))
user = db.relationship('User', backref='speakers')
def __init__(self,
name=None,
photo=None,
thumbnail=None,
small=None,
icon=None,
short_biography=None,
long_biography=None,
speaking_experience=None,
email=None,
mobile=None,
website=None,
twitter=None,
facebook=None,
github=None,
linkedin=None,
organisation=None,
featured=False,
position=None,
country=None,
city=None,
gender=None,
heard_from=None,
sponsorship_required=None,
event_id=None,
user=None):
self.name = name
self.photo = photo
self.thumbnail = thumbnail
self.small = small
self.icon = icon
self.short_biography = short_biography
self.long_biography = long_biography
self.speaking_experience = speaking_experience
self.email = email
self.mobile = mobile
self.website = website
self.twitter = twitter
self.facebook = facebook
self.github = github
self.linkedin = linkedin
self.featured = featured
self.organisation = organisation
self.position = position
self.country = country
self.city = city
self.gender = gender
self.heard_from = heard_from
self.sponsorship_required = sponsorship_required
self.event_id = event_id
# ensure links are in social fields
self.ensure_social_links()
self.user = user
@staticmethod
def get_service_name():
return 'speaker'
def __repr__(self):
return '<Speaker %r>' % self.name
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
def __setattr__(self, name, value):
if name == 'short_biography' or name == 'long_biography' or name == 'speaking_experience' or name == 'sponsorship_required':
super(Speaker, self).__setattr__(name, clean_html(clean_up_string(value)))
else:
super(Speaker, self).__setattr__(name, value)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
session_data = [{'title': session.title, 'id': session.id}
for session in self.sessions]
return {
'id': self.id,
'name': self.name,
'photo': self.photo,
'thumbnail': self.thumbnail,
'small': self.small,
'icon': self.icon,
'short_biography': self.short_biography,
'long_biography': self.long_biography,
'speaking_experience': self.speaking_experience,
'email': self.email,
'mobile': self.mobile,
'website': self.website,
'twitter': self.twitter,
'facebook': self.facebook,
'github': self.github,
'linkedin': self.linkedin,
'organisation': self.organisation,
'position': self.position,
'country': self.country,
'city': self.city,
'gender': self.gender,
'heard_from': self.heard_from,
'sponsorship_required': self.sponsorship_required,
'sessions': session_data
}
def ensure_social_links(self):
"""convert usernames in social network fields to full links"""
self.twitter = ensure_social_link('https://twitter.com', self.twitter)
self.facebook = ensure_social_link('https://www.facebook.com', self.facebook)
self.github = ensure_social_link('https://github.com', self.github)
self.linkedin = ensure_social_link('https://www.linkedin.com/in', self.linkedin)
| gpl-3.0 | -6,421,023,573,535,674,000 | 35.040268 | 132 | 0.571508 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2018_04_01/models/key_vault_and_key_reference_py3.py | 1 | 1500 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class KeyVaultAndKeyReference(Model):
"""Key Vault Key Url and vault id of KeK, KeK is optional and when provided is
used to unwrap the encryptionKey.
All required parameters must be populated in order to send to Azure.
:param source_vault: Required. Resource id of the KeyVault containing the
key or secret
:type source_vault: ~azure.mgmt.compute.v2018_04_01.models.SourceVault
:param key_url: Required. Url pointing to a key or secret in KeyVault
:type key_url: str
"""
_validation = {
'source_vault': {'required': True},
'key_url': {'required': True},
}
_attribute_map = {
'source_vault': {'key': 'sourceVault', 'type': 'SourceVault'},
'key_url': {'key': 'keyUrl', 'type': 'str'},
}
def __init__(self, *, source_vault, key_url: str, **kwargs) -> None:
super(KeyVaultAndKeyReference, self).__init__(**kwargs)
self.source_vault = source_vault
self.key_url = key_url
| mit | -3,852,129,576,539,465,000 | 35.585366 | 82 | 0.600667 | false |
yordan-desta/QgisIns | python/plugins/processing/core/AlgorithmProvider.py | 1 | 4825 | # -*- coding: utf-8 -*-
"""
***************************************************************************
AlgorithmProvider.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4 import QtGui, QtCore
from qgis.core import *
from processing.core.ProcessingConfig import Setting, ProcessingConfig
class AlgorithmProvider:
"""This is the base class for algorithms providers.
An algorithm provider is a set of related algorithms, typically
from the same external application or related to a common area
of analysis.
"""
def __init__(self):
# Indicates if the provider should be active by default.
# For provider relying on an external software, this should be
# False, so the user should activate them manually and install
# the required software in advance.
self.activate = True
self.actions = []
self.contextMenuActions = []
def loadAlgorithms(self):
self.algs = []
name = 'ACTIVATE_' + self.getName().upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
return
else:
self._loadAlgorithms()
for alg in self.algs:
alg.provider = self
# Methods to be overridden.
def _loadAlgorithms(self):
"""Algorithm loading should take place here, filling self.algs,
which is a list of elements of class GeoAlgorithm. Use that
class to create your own algorithms.
"""
pass
def initializeSettings(self):
"""This is the place where you should add config parameters
using the ProcessingConfig class.
This method is called when a provider is added to the
Processing framework. By default it just adds a setting to
activate or deactivate algorithms from the provider.
"""
ProcessingConfig.settingIcons[self.getDescription()] = self.getIcon()
name = 'ACTIVATE_' + self.getName().upper().replace(' ', '_')
ProcessingConfig.addSetting(Setting(self.getDescription(), name,
self.tr('Activate'), self.activate))
def unload(self):
"""Do here anything that you want to be done when the provider
is removed from the list of available ones.
This method is called when you remove the provider from
Processing. Removal of config setting should be done here.
"""
name = 'ACTIVATE_' + self.getName().upper().replace(' ', '_')
ProcessingConfig.removeSetting(name)
def getName(self):
"""Returns the name to use to create the command-line name.
Should be a short descriptive name of the provider.
"""
return 'processing'
def getDescription(self):
"""Returns the full name of the provider.
"""
return self.tr('Generic algorithm provider')
def getIcon(self):
return QtGui.QIcon(os.path.dirname(__file__) + '/../images/alg.png')
def getSupportedOutputRasterLayerExtensions(self):
return ['tif']
def getSupportedOutputVectorLayerExtensions(self):
formats = QgsVectorFileWriter.supportedFiltersAndFormats()
extensions = ['shp'] # shp is the default, should be the first
for extension in formats.keys():
extension = unicode(extension)
extension = extension[extension.find('*.') + 2:]
extension = extension[:extension.find(' ')]
if extension.lower() != 'shp':
extensions.append(extension)
return extensions
def getSupportedOutputTableExtensions(self):
return ['csv']
def supportsNonFileBasedOutput(self):
return False
def tr(self, string, context=''):
if context == '':
context = 'AlgorithmProvider'
return QtCore.QCoreApplication.translate(context, string)
| gpl-2.0 | 7,054,145,103,071,834,000 | 36.403101 | 77 | 0.573264 | false |
mozillazg/python-pinyin | pypinyin/style/finals.py | 1 | 2173 | # -*- coding: utf-8 -*-
"""韵母相关拼音风格:
Style.FINALS
Style.FINALS_TONE
Style.FINALS_TONE2
Style.FINALS_TONE3
"""
from __future__ import unicode_literals
from pypinyin.constants import Style
from pypinyin.style import register
from pypinyin.style._constants import RE_TONE3
from pypinyin.standard import convert_finals
from pypinyin.style._utils import (
get_finals, has_finals,
replace_symbol_to_number, replace_symbol_to_no_symbol
)
class FinalsConverter(object):
def to_finals(self, pinyin, **kwargs):
if kwargs.get('strict'):
pinyin = convert_finals(pinyin)
has_fi = has_finals(pinyin)
# 替换声调字符为无声调字符
pinyin = replace_symbol_to_no_symbol(pinyin)
if not has_fi:
return pinyin
# 获取韵母部分
return get_finals(pinyin, strict=False)
def to_finals_tone(self, pinyin, **kwargs):
if not has_finals(pinyin):
return pinyin
# 获取韵母部分
return get_finals(pinyin, strict=kwargs.get('strict'))
def to_finals_tone2(self, pinyin, **kwargs):
if kwargs.get('strict'):
pinyin = convert_finals(pinyin)
has_fi = has_finals(pinyin)
# 用数字表示声调
pinyin = replace_symbol_to_number(pinyin)
if not has_fi:
return pinyin
# 获取韵母部分
return get_finals(pinyin, strict=False)
def to_finals_tone3(self, pinyin, **kwargs):
if kwargs.get('strict'):
pinyin = convert_finals(pinyin)
has_fi = has_finals(pinyin)
# 用数字表示声调
pinyin = replace_symbol_to_number(pinyin)
# 将声调数字移动到最后
pinyin = RE_TONE3.sub(r'\1\3\2', pinyin)
if not has_fi:
return pinyin
# 获取韵母部分
return get_finals(pinyin, strict=False)
converter = FinalsConverter()
register(Style.FINALS, func=converter.to_finals)
register(Style.FINALS_TONE, func=converter.to_finals_tone)
register(Style.FINALS_TONE2, func=converter.to_finals_tone2)
register(Style.FINALS_TONE3, func=converter.to_finals_tone3)
| mit | -2,118,293,914,438,749,700 | 26.90411 | 62 | 0.64163 | false |
tulsawebdevs/django-multi-gtfs | multigtfs/tests/test_service.py | 2 | 3051 | #
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
from datetime import date
from django.test import TestCase
from django.utils.six import StringIO
from multigtfs.models import Feed, Service
class ServiceTest(TestCase):
def setUp(self):
self.feed = Feed.objects.create()
def test_string(self):
service = Service.objects.create(
feed=self.feed, service_id='S1', start_date=date(2011, 4, 14),
end_date=date(2011, 12, 31))
self.assertEqual(str(service), '%d-S1' % self.feed.id)
def test_import_calendar_txt(self):
calendar_txt = StringIO("""\
service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday,\
start_date,end_date
W,1,0,1,0,1,0,1,20120414,20121231
""")
Service.import_txt(calendar_txt, self.feed)
service = Service.objects.get()
self.assertEqual(service.feed, self.feed)
self.assertEqual(service.service_id, 'W')
self.assertTrue(service.monday)
self.assertFalse(service.tuesday)
self.assertTrue(service.wednesday)
self.assertFalse(service.thursday)
self.assertTrue(service.friday)
self.assertFalse(service.saturday)
self.assertTrue(service.sunday)
self.assertEqual(service.start_date, date(2012, 4, 14))
self.assertEqual(service.end_date, date(2012, 12, 31))
def test_import_calendar_duplicate(self):
calendar_txt = StringIO("""\
service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday,\
start_date,end_date
W,1,0,1,0,1,0,1,20120414,20121231
W,0,1,0,1,0,1,0,20120414,20121231
""")
Service.import_txt(calendar_txt, self.feed)
service = Service.objects.get() # Only one
self.assertEqual(service.feed, self.feed)
self.assertEqual(service.service_id, 'W')
def test_export_calendar_txt_none(self):
calendar_txt = Service.export_txt(self.feed)
self.assertFalse(calendar_txt)
def test_export_calendar_txt(self):
Service.objects.create(
feed=self.feed, service_id='W', monday=True, tuesday=False,
wednesday=True, thursday=False, friday=True, saturday=False,
sunday=True, start_date=date(2012, 7, 17),
end_date=date(2013, 7, 17))
calendar_txt = Service.export_txt(self.feed)
self.assertEqual(calendar_txt, """\
service_id,monday,tuesday,wednesday,thursday,friday,saturday,sunday,\
start_date,end_date
W,1,0,1,0,1,0,1,20120717,20130717
""")
| apache-2.0 | 2,180,882,623,893,636,900 | 36.207317 | 74 | 0.689282 | false |
boundlessgeo/QGIS | tests/src/python/test_qgsvalidityresultswidget.py | 6 | 3113 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for validity results widget
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '03/12/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
from qgis.core import QgsValidityCheckResult
from qgis.gui import QgsValidityCheckResultsModel
from qgis.PyQt.QtCore import QModelIndex, Qt
from qgis.testing import start_app, unittest
app = start_app()
class TestQgsValidityResultsWidget(unittest.TestCase):
def testModel(self):
res1 = QgsValidityCheckResult()
res1.type = QgsValidityCheckResult.Warning
res1.title = 'test'
res1.detailedDescription = 'blah blah'
res2 = QgsValidityCheckResult()
res2.type = QgsValidityCheckResult.Critical
res2.title = 'test2'
res2.detailedDescription = 'blah blah2'
res3 = QgsValidityCheckResult()
res3.type = QgsValidityCheckResult.Warning
res3.title = 'test3'
res3.detailedDescription = 'blah blah3'
res4 = QgsValidityCheckResult()
res4.type = QgsValidityCheckResult.Warning
res4.title = 'test4'
res4.detailedDescription = 'blah blah4'
model = QgsValidityCheckResultsModel([])
self.assertEqual(model.rowCount(), 0)
self.assertFalse(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole))
self.assertFalse(model.data(model.index(-1, 0, QModelIndex()), Qt.DisplayRole))
self.assertFalse(model.data(model.index(1, 0, QModelIndex()), Qt.DisplayRole))
model = QgsValidityCheckResultsModel([res1, res2, res3, res4])
self.assertEqual(model.rowCount(), 4)
self.assertFalse(model.data(model.index(-1, 0, QModelIndex()), Qt.DisplayRole))
self.assertEqual(model.data(model.index(0, 0, QModelIndex()), Qt.DisplayRole), 'test')
self.assertEqual(model.data(model.index(1, 0, QModelIndex()), Qt.DisplayRole), 'test2')
self.assertEqual(model.data(model.index(2, 0, QModelIndex()), Qt.DisplayRole), 'test3')
self.assertEqual(model.data(model.index(3, 0, QModelIndex()), Qt.DisplayRole), 'test4')
self.assertFalse(model.data(model.index(4, 0, QModelIndex()), Qt.DisplayRole))
self.assertEqual(model.data(model.index(0, 0, QModelIndex()), QgsValidityCheckResultsModel.DescriptionRole), 'blah blah')
self.assertEqual(model.data(model.index(1, 0, QModelIndex()), QgsValidityCheckResultsModel.DescriptionRole), 'blah blah2')
self.assertEqual(model.data(model.index(2, 0, QModelIndex()), QgsValidityCheckResultsModel.DescriptionRole), 'blah blah3')
self.assertEqual(model.data(model.index(3, 0, QModelIndex()), QgsValidityCheckResultsModel.DescriptionRole), 'blah blah4')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | 6,473,184,253,258,920,000 | 44.779412 | 130 | 0.698362 | false |
dmlc/tvm | python/tvm/relay/data_dep_optimization/simplify_fc_transpose.py | 4 | 1982 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, not-context-manager
"""Automatic optimize fc tranpose"""
import numpy as np
import tvm
from tvm import relay
from tvm.relay.analysis import search_fc_transpose
from .utils import _run_opt_pass
def convert(func, params):
"""convert all ```y = nn.dense(x, transpose(w, [1, 0]))``` to
```y = nn.dense(x, wt)```
Parameters
----------
func : relay.Expr
Expr will be optimized
params : Dict[String, tvm.nd.array]
Parameters of Expr
Returns
-------
new_func : relay.Expr
Mutated Expr from ```y = nn.dense(x, transpose(w, [1, 0]))``` to
```y = nn.dense(x, wt)```
params: Dict[String, tvm.nd.array]
Parameters of mutated Expr, with weights pre-transposed
"""
weight_info = search_fc_transpose(func)
for item in weight_info:
name = str(item)
w_np = params[name].asnumpy()
new_w = np.transpose(w_np, axes=[1, 0])
params[name + ".T"] = tvm.nd.array(new_w)
del params[name]
new_func = _run_opt_pass(
func,
relay.transform.SimplifyFCTranspose(
weight_info,
),
)
return new_func, params
| apache-2.0 | -7,829,170,379,208,612,000 | 32.033333 | 72 | 0.659435 | false |
lepture/oauthlib | tests/oauth1/rfc5849/test_signatures.py | 2 | 13424 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
try:
from urllib import quote
except ImportError:
from urllib.parse import quote
from oauthlib.oauth1.rfc5849.signature import collect_parameters
from oauthlib.oauth1.rfc5849.signature import construct_base_string
from oauthlib.oauth1.rfc5849.signature import normalize_base_string_uri
from oauthlib.oauth1.rfc5849.signature import normalize_parameters
from oauthlib.oauth1.rfc5849.signature import sign_hmac_sha1
from oauthlib.oauth1.rfc5849.signature import sign_rsa_sha1
from oauthlib.oauth1.rfc5849.signature import sign_plaintext
from oauthlib.common import unicode_type
from ...unittest import TestCase
class SignatureTests(TestCase):
uri_query = "b5=%3D%253D&a3=a&c%40=&a2=r%20b&c2=&a3=2+q"
authorization_header = """OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="djosJKDKJSD8743243%2Fjdk33klY%3D" """.strip()
body = "content=This+is+being+the+body+of+things"
http_method = b"post"
base_string_url = quote("http://example.com/request?b5=%3D%253D"
"&a3=a&c%40=&a2=r%20b").encode('utf-8')
normalized_encoded_request_parameters = quote(
'OAuth realm="Example",'
'oauth_consumer_key="9djdj82h48djs9d2",'
'oauth_token="kkk9d7dh3k39sjv7",'
'oauth_signature_method="HMAC-SHA1",'
'oauth_timestamp="137131201",'
'oauth_nonce="7d8f3e4a",'
'oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"'
).encode('utf-8')
client_secret = b"ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ"
resource_owner_secret = b"just-a-string asdasd"
control_base_string = (
"POST&http%253A%2F%2Fexample.com%2Frequest%253F"
"b5%253D%25253D%2525253D%2526"
"a3%253D"
"a%2526"
"c%252540%253D%2526"
"a2%253D"
"r%252520b&"
"OAuth%2520realm%253D%2522Example%2522%252C"
"oauth_consumer_key%253D%25229djdj82h48djs9d2%2522%252C"
"oauth_token%253D%2522kkk9d7dh3k39sjv7%2522%252C"
"oauth_signature_method%253D%2522HMAC-SHA1%2522%252C"
"oauth_timestamp%253D%2522137131201%2522%252C"
"oauth_nonce%253D%25227d8f3e4a%2522%252C"
"oauth_signature%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522")
def test_construct_base_string(self):
"""
Example text to be turned into a base string::
POST /request?b5=%3D%253D&a3=a&c%40=&a2=r%20b HTTP/1.1
Host: example.com
Content-Type: application/x-www-form-urlencoded
Authorization: OAuth realm="Example",
oauth_consumer_key="9djdj82h48djs9d2",
oauth_token="kkk9d7dh3k39sjv7",
oauth_signature_method="HMAC-SHA1",
oauth_timestamp="137131201",
oauth_nonce="7d8f3e4a",
oauth_signature="bYT5CMsGcbgUdFHObYMEfcx6bsw%3D"
Sample Base string generated and tested against::
POST&http%253A%2F%2Fexample.com%2Frequest%253Fb5%253D%25253D%252525
3D%2526a3%253Da%2526c%252540%253D%2526a2%253Dr%252520b&OAuth%2520re
alm%253D%2522Example%2522%252Coauth_consumer_key%253D%25229djdj82h4
8djs9d2%2522%252Coauth_token%253D%2522kkk9d7dh3k39sjv7%2522%252Coau
th_signature_method%253D%2522HMAC-SHA1%2522%252Coauth_timestamp%253
D%2522137131201%2522%252Coauth_nonce%253D%25227d8f3e4a%2522%252Coau
th_signature%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522
"""
self.assertRaises(ValueError, construct_base_string,
self.http_method,
self.base_string_url,
self.normalized_encoded_request_parameters)
self.assertRaises(ValueError, construct_base_string,
self.http_method.decode('utf-8'),
self.base_string_url,
self.normalized_encoded_request_parameters)
self.assertRaises(ValueError, construct_base_string,
self.http_method.decode('utf-8'),
self.base_string_url.decode('utf-8'),
self.normalized_encoded_request_parameters)
base_string = construct_base_string(
self.http_method.decode('utf-8'),
self.base_string_url.decode('utf-8'),
self.normalized_encoded_request_parameters.decode('utf-8')
)
self.assertEqual(self.control_base_string, base_string)
def test_normalize_base_string_uri(self):
"""
Example text to be turned into a normalized base string uri::
GET /?q=1 HTTP/1.1
Host: www.example.net:8080
Sample string generated::
https://www.example.net:8080/
"""
# test for unicode failure
uri = b"www.example.com:8080"
self.assertRaises(ValueError, normalize_base_string_uri, uri)
# test for missing scheme
uri = "www.example.com:8080"
self.assertRaises(ValueError, normalize_base_string_uri, uri)
# test a URI with the default port
uri = "http://www.example.com:80/"
self.assertEquals(normalize_base_string_uri(uri),
"http://www.example.com/")
# test a URI missing a path
uri = "http://www.example.com"
self.assertEquals(normalize_base_string_uri(uri),
"http://www.example.com/")
# test a relative URI
uri = "/a-host-relative-uri"
host = "www.example.com"
self.assertRaises(ValueError, normalize_base_string_uri, (uri, host))
# test overriding the URI's netloc with a host argument
uri = "http://www.example.com/a-path"
host = "alternatehost.example.com"
self.assertEquals(normalize_base_string_uri(uri, host),
"http://alternatehost.example.com/a-path")
def test_collect_parameters(self):
"""We check against parameters multiple times in case things change
after more parameters are added.
"""
self.assertEquals(collect_parameters(), [])
# Check against uri_query
parameters = collect_parameters(uri_query=self.uri_query)
correct_parameters = [('b5', '=%3D'),
('a3', 'a'),
('c@', ''),
('a2', 'r b'),
('c2', ''),
('a3', '2 q')]
self.assertEqual(sorted(parameters), sorted(correct_parameters))
headers = {'Authorization': self.authorization_header}
# check against authorization header as well
parameters = collect_parameters(
uri_query=self.uri_query, headers=headers)
parameters_with_realm = collect_parameters(
uri_query=self.uri_query, headers=headers, with_realm=True)
# Redo the checks against all the parameters. Duplicated code but
# better safety
correct_parameters += [
('oauth_nonce', '7d8f3e4a'),
('oauth_timestamp', '137131201'),
('oauth_consumer_key', '9djdj82h48djs9d2'),
('oauth_signature_method', 'HMAC-SHA1'),
('oauth_token', 'kkk9d7dh3k39sjv7')]
correct_parameters_with_realm = (
correct_parameters + [('realm', 'Example')])
self.assertEqual(sorted(parameters), sorted(correct_parameters))
self.assertEqual(sorted(parameters_with_realm),
sorted(correct_parameters_with_realm))
# Add in the body.
# TODO: Add more content for the body. Daniel Greenfeld 2012/03/12
# Redo again the checks against all the parameters. Duplicated code
# but better safety
parameters = collect_parameters(
uri_query=self.uri_query, body=self.body, headers=headers)
correct_parameters += [
('content', 'This is being the body of things')]
self.assertEqual(sorted(parameters), sorted(correct_parameters))
def test_normalize_parameters(self):
""" We copy some of the variables from the test method above."""
headers = {'Authorization': self.authorization_header}
parameters = collect_parameters(
uri_query=self.uri_query, body=self.body, headers=headers)
normalized = normalize_parameters(parameters)
# Unicode everywhere and always
self.assertIsInstance(normalized, unicode_type)
# Lets see if things are in order
# check to see that querystring keys come in alphanumeric order:
querystring_keys = ['a2', 'a3', 'b5', 'content', 'oauth_consumer_key',
'oauth_nonce', 'oauth_signature_method',
'oauth_timestamp', 'oauth_token']
index = -1 # start at -1 because the 'a2' key starts at index 0
for key in querystring_keys:
self.assertGreater(normalized.index(key), index)
index = normalized.index(key)
def test_sign_hmac_sha1(self):
"""Verifying HMAC-SHA1 signature against one created by OpenSSL."""
# Control signature created using openssl:
# echo -n $(cat <message>) | openssl dgst -binary -hmac <key> | base64
control_signature = "Uau4O9Kpd2k6rvh7UZN/RN+RG7Y="
self.assertRaises(ValueError, sign_hmac_sha1, self.control_base_string,
self.client_secret, self.resource_owner_secret)
sign = sign_hmac_sha1(self.control_base_string,
self.client_secret.decode('utf-8'),
self.resource_owner_secret.decode('utf-8'))
self.assertEquals(len(sign), 28)
self.assertEquals(sign, control_signature)
def test_sign_rsa_sha1(self):
"""Verify RSA-SHA1 signature against one created by OpenSSL."""
base_string = (b"POST&http%253A%2F%2Fexample.com%2Frequest%253Fb5%253D"
b"%25253D%2525253D%2526a3%253Da%2526c%252540%253D%2526"
b"a2%253Dr%252520b&OAuth%2520realm%253D%2522Example%25"
b"22%252Coauth_consumer_key%253D%25229djdj82h48djs9d2"
b"%2522%252Coauth_token%253D%2522kkk9d7dh3k39sjv7%2522"
b"%252Coauth_signature_method%253D%2522HMAC-SHA1%2522"
b"%252Coauth_timestamp%253D%2522137131201%2522%252Coau"
b"th_nonce%253D%25227d8f3e4a%2522%252Coauth_signature"
b"%253D%2522bYT5CMsGcbgUdFHObYMEfcx6bsw%25253D%2522")
# Generated using: $ openssl genrsa -out <key>.pem 1024
# PyCrypto / python-rsa requires the key to be concatenated with
# linebreaks.
private_key = (
b"-----BEGIN RSA PRIVATE KEY-----\nMIICXgIBAAKBgQDk1/bxy"
b"S8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG\nAlwXWfzXw"
b"SMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVa"
b"h\n5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjT"
b"MO7IdrwIDAQAB\nAoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9"
b"GxocdM1m30WyWRFMEz2nKJ8fR\np3vTD4w8yplTOhcoXdQZl0kRoaD"
b"zrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC\nDY6xveQczE7qt7V"
b"k7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i\nrf6"
b"qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVaw"
b"Ft3UMhe\n542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+"
b"XO/2xKp/d/ty1OIeovx\nC60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZL"
b"eMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT\nSuy30sKjLzqoGw1kR+wv7"
b"C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA\nkmaMg2PNr"
b"jUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzV"
b"S\nJzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lT"
b"LVduVgh4v5yLT\nGa6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPv"
b"dMlxqXA==\n-----END RSA PRIVATE KEY-----"
)
# Base string saved in "<message>". Signature obtained using:
# $ echo -n $(cat <message>) | openssl dgst -sign <key>.pem | base64
# where echo -n suppresses the last linebreak.
control_signature = (
"zV5g8ArdMuJuOXlH8XOqfLHS11XdthfIn4HReDm7jz8JmgLabHGmVBqCkCfZoFJPH"
"dka7tLvCplK/jsV4FUOnftrJOQhbXguuBdi87/hmxOFKLmQYqqlEW7BdXmwKLZcki"
"qq3qE5XziBgKSAFRkxJ4gmJAymvJBtrJYN9728rK8="
)
sign = sign_rsa_sha1(base_string, private_key)
self.assertEquals(sign, control_signature)
sign = sign_rsa_sha1(base_string.decode('utf-8'), private_key)
self.assertEquals(sign, control_signature)
def test_sign_plaintext(self):
""" """
self.assertRaises(ValueError, sign_plaintext, self.client_secret,
self.resource_owner_secret)
sign = sign_plaintext(self.client_secret.decode('utf-8'),
self.resource_owner_secret.decode('utf-8'))
correct = ("ECrDNoq1VYzzzzzzzzzyAK7TwZNtPnkqatqZZZZ&"
"just-a-string%20%20%20%20asdasd")
self.assertEquals(sign, correct)
| bsd-3-clause | 5,968,855,021,790,440,000 | 45.449827 | 79 | 0.623585 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/win32com/client/build.py | 1 | 23871 | """Contains knowledge to build a COM object definition.
This module is used by both the @dynamic@ and @makepy@ modules to build
all knowledge of a COM object.
This module contains classes which contain the actual knowledge of the object.
This include parameter and return type information, the COM dispid and CLSID, etc.
Other modules may use this information to generate .py files, use the information
dynamically, or possibly even generate .html documentation for objects.
"""
#
# NOTES: DispatchItem and MapEntry used by dynamic.py.
# the rest is used by makepy.py
#
# OleItem, DispatchItem, MapEntry, BuildCallList() is used by makepy
import sys
import string
import types
from keyword import iskeyword
from win32com.client import NeedUnicodeConversions
import pythoncom
from pywintypes import UnicodeType, TimeType
import winerror
# A string ending with a quote can not be safely triple-quoted.
def _safeQuotedString(s):
if s[-1]=='"': s = s[:-1]+'\\"'
return '"""%s"""' % s
error = "PythonCOM.Client.Build error"
class NotSupportedException(Exception): pass # Raised when we cant support a param type.
DropIndirection="DropIndirection"
NoTranslateTypes = [
pythoncom.VT_BOOL, pythoncom.VT_CLSID, pythoncom.VT_CY,
pythoncom.VT_DATE, pythoncom.VT_DECIMAL, pythoncom.VT_EMPTY,
pythoncom.VT_ERROR, pythoncom.VT_FILETIME, pythoncom.VT_HRESULT,
pythoncom.VT_I1, pythoncom.VT_I2, pythoncom.VT_I4,
pythoncom.VT_I8, pythoncom.VT_INT, pythoncom.VT_NULL,
pythoncom.VT_R4, pythoncom.VT_R8, pythoncom.VT_NULL,
pythoncom.VT_STREAM,
pythoncom.VT_UI1, pythoncom.VT_UI2, pythoncom.VT_UI4,
pythoncom.VT_UI8, pythoncom.VT_UINT, pythoncom.VT_VOID,
]
NoTranslateMap = {}
for v in NoTranslateTypes:
NoTranslateMap[v] = None
class MapEntry:
"Simple holder for named attibutes - items in a map."
def __init__(self, desc_or_id, names=None, doc=None, resultCLSID=pythoncom.IID_NULL, resultDoc = None, hidden=0):
if type(desc_or_id)==type(0):
self.dispid = desc_or_id
self.desc = None
else:
self.dispid = desc_or_id[0]
self.desc = desc_or_id
self.names = names
self.doc = doc
self.resultCLSID = resultCLSID
self.resultDocumentation = resultDoc
self.wasProperty = 0 # Have I been transformed into a function so I can pass args?
self.hidden = hidden
def GetResultCLSID(self):
rc = self.resultCLSID
if rc == pythoncom.IID_NULL: return None
return rc
# Return a string, suitable for output - either "'{...}'" or "None"
def GetResultCLSIDStr(self):
rc = self.GetResultCLSID()
if rc is None: return "None"
return repr(str(rc)) # Convert the IID object to a string, then to a string in a string.
def GetResultName(self):
if self.resultDocumentation is None:
return None
return self.resultDocumentation[0]
class OleItem:
typename = "OleItem"
def __init__(self, doc=None):
self.doc = doc
if self.doc:
self.python_name = MakePublicAttributeName(self.doc[0])
else:
self.python_name = None
self.bWritten = 0
self.bIsDispatch = 0
self.bIsSink = 0
self.clsid = None
self.co_class = None
class DispatchItem(OleItem):
typename = "DispatchItem"
def __init__(self, typeinfo=None, attr=None, doc=None, bForUser=1):
OleItem.__init__(self,doc)
self.propMap = {}
self.propMapGet = {}
self.propMapPut = {}
self.mapFuncs = {}
self.defaultDispatchName = None
self.hidden = 0
if typeinfo:
self.Build(typeinfo, attr, bForUser)
def _propMapPutCheck_(self,key,item):
ins, outs, opts = self.CountInOutOptArgs(item.desc[2])
if ins>1: # if a Put property takes more than 1 arg:
if opts+1==ins or ins==item.desc[6]+1:
newKey = "Set" + key
deleteExisting = 0 # This one is still OK
else:
deleteExisting = 1 # No good to us
if self.mapFuncs.has_key(key) or self.propMapGet.has_key(key):
newKey = "Set" + key
else:
newKey = key
item.wasProperty = 1
self.mapFuncs[newKey] = item
if deleteExisting:
del self.propMapPut[key]
def _propMapGetCheck_(self,key,item):
ins, outs, opts = self.CountInOutOptArgs(item.desc[2])
if ins > 0: # if a Get property takes _any_ in args:
if item.desc[6]==ins or ins==opts:
newKey = "Get" + key
deleteExisting = 0 # This one is still OK
else:
deleteExisting = 1 # No good to us
if self.mapFuncs.has_key(key):
newKey = "Get" + key
else:
newKey = key
item.wasProperty = 1
self.mapFuncs[newKey] = item
if deleteExisting:
del self.propMapGet[key]
def _AddFunc_(self,typeinfo,fdesc,bForUser):
id = fdesc.memid
funcflags = fdesc.wFuncFlags
try:
names = typeinfo.GetNames(id)
name=names[0]
except pythoncom.ole_error:
name = ""
names = None
doc = None
try:
if bForUser:
doc = typeinfo.GetDocumentation(id)
except pythoncom.ole_error:
pass
if id==0 and name:
self.defaultDispatchName = name
invkind = fdesc.invkind
# We need to translate any Alias', Enums, structs etc in result and args
typerepr, flag, defval = fdesc.rettype
# sys.stderr.write("%s result - %s -> " % (name, typerepr))
typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo)
# sys.stderr.write("%s\n" % (typerepr,))
fdesc.rettype = typerepr, flag, defval, resultCLSID
# Translate any Alias or Enums in argument list.
argList = []
for argDesc in fdesc.args:
typerepr, flag, defval = argDesc
# sys.stderr.write("%s arg - %s -> " % (name, typerepr))
arg_type, arg_clsid, arg_doc = _ResolveType(typerepr, typeinfo)
argDesc = arg_type, flag, defval, arg_clsid
# sys.stderr.write("%s\n" % (argDesc[0],))
argList.append(argDesc)
fdesc.args = tuple(argList)
hidden = (funcflags & pythoncom.FUNCFLAG_FHIDDEN) != 0
if invkind == pythoncom.INVOKE_PROPERTYGET:
map = self.propMapGet
# This is not the best solution, but I dont think there is
# one without specific "set" syntax.
# If there is a single PUT or PUTREF, it will function as a property.
# If there are both, then the PUT remains a property, and the PUTREF
# gets transformed into a function.
# (in vb, PUT=="obj=other_obj", PUTREF="set obj=other_obj
elif invkind in (pythoncom.INVOKE_PROPERTYPUT, pythoncom.INVOKE_PROPERTYPUTREF):
# Special case
existing = self.propMapPut.get(name, None)
if existing is not None:
if existing.desc[4]==pythoncom.INVOKE_PROPERTYPUT: # Keep this one
map = self.mapFuncs
name = "Set"+name
else: # Existing becomes a func.
existing.wasProperty = 1
self.mapFuncs["Set"+name]=existing
map = self.propMapPut # existing gets overwritten below.
else:
map = self.propMapPut # first time weve seen it.
elif invkind == pythoncom.INVOKE_FUNC:
map = self.mapFuncs
else:
map = None
if not map is None:
# if map.has_key(name):
# sys.stderr.write("Warning - overwriting existing method/attribute %s\n" % name)
map[name] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden)
# any methods that can't be reached via DISPATCH we return None
# for, so dynamic dispatch doesnt see it.
if fdesc.funckind != pythoncom.FUNC_DISPATCH:
return None
return (name,map)
return None
def _AddVar_(self,typeinfo,fdesc,bForUser):
### need pythoncom.VARFLAG_FRESTRICTED ...
### then check it
if fdesc.varkind == pythoncom.VAR_DISPATCH:
id = fdesc.memid
names = typeinfo.GetNames(id)
# Translate any Alias or Enums in result.
typerepr, flags, defval = fdesc.elemdescVar
typerepr, resultCLSID, resultDoc = _ResolveType(typerepr, typeinfo)
fdesc.elemdescVar = typerepr, flags, defval
doc = None
try:
if bForUser: doc = typeinfo.GetDocumentation(id)
except pythoncom.ole_error:
pass
# handle the enumerator specially
map = self.propMap
# Check if the element is hidden.
hidden = 0
if hasattr(fdesc,"wVarFlags"):
hidden = (fdesc.wVarFlags & 0x40) != 0 # VARFLAG_FHIDDEN
map[names[0]] = MapEntry(tuple(fdesc), names, doc, resultCLSID, resultDoc, hidden)
return (names[0],map)
else:
return None
def Build(self, typeinfo, attr, bForUser = 1):
self.clsid = attr[0]
self.bIsDispatch = (attr.wTypeFlags & pythoncom.TYPEFLAG_FDISPATCHABLE) != 0
if typeinfo is None: return
# Loop over all methods
for j in xrange(attr[6]):
fdesc = typeinfo.GetFuncDesc(j)
self._AddFunc_(typeinfo,fdesc,bForUser)
# Loop over all variables (ie, properties)
for j in xrange(attr[7]):
fdesc = typeinfo.GetVarDesc(j)
self._AddVar_(typeinfo,fdesc,bForUser)
# Now post-process the maps. For any "Get" or "Set" properties
# that have arguments, we must turn them into methods. If a method
# of the same name already exists, change the name.
for key, item in self.propMapGet.items():
self._propMapGetCheck_(key,item)
for key, item in self.propMapPut.items():
self._propMapPutCheck_(key,item)
def CountInOutOptArgs(self, argTuple):
"Return tuple counting in/outs/OPTS. Sum of result may not be len(argTuple), as some args may be in/out."
ins = out = opts = 0
for argCheck in argTuple:
inOut = argCheck[1]
if inOut==0:
ins = ins + 1
out = out + 1
else:
if inOut & pythoncom.PARAMFLAG_FIN:
ins = ins + 1
if inOut & pythoncom.PARAMFLAG_FOPT:
opts = opts + 1
if inOut & pythoncom.PARAMFLAG_FOUT:
out = out + 1
return ins, out, opts
def MakeFuncMethod(self, entry, name, bMakeClass = 1):
# If we have a type description, and not varargs...
if entry.desc is not None and (len(entry.desc) < 6 or entry.desc[6]!=-1):
return self.MakeDispatchFuncMethod(entry, name, bMakeClass)
else:
return self.MakeVarArgsFuncMethod(entry, name, bMakeClass)
def MakeDispatchFuncMethod(self, entry, name, bMakeClass = 1):
fdesc = entry.desc
doc = entry.doc
names = entry.names
ret = []
if bMakeClass:
linePrefix = "\t"
defNamedOptArg = "defaultNamedOptArg"
defNamedNotOptArg = "defaultNamedNotOptArg"
defUnnamedArg = "defaultUnnamedArg"
else:
linePrefix = ""
defNamedOptArg = "pythoncom.Missing"
defNamedNotOptArg = "pythoncom.Missing"
defUnnamedArg = "pythoncom.Missing"
defOutArg = "pythoncom.Missing"
id = fdesc[0]
s = linePrefix + 'def ' + name + '(self' + BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg) + '):'
ret.append(s)
if doc and doc[1]:
ret.append(linePrefix + '\t' + _safeQuotedString(doc[1]))
# print "fdesc is ", fdesc
resclsid = entry.GetResultCLSID()
if resclsid:
resclsid = "'%s'" % resclsid
else:
resclsid = 'None'
# Strip the default values from the arg desc
retDesc = fdesc[8][:2]
argsDesc = tuple(map(lambda what: what[:2], fdesc[2]))
# The runtime translation of the return types is expensive, so when we know the
# return type of the function, there is no need to check the type at runtime.
# To qualify, this function must return a "simple" type, and have no byref args.
# Check if we have byrefs or anything in the args which mean we still need a translate.
param_flags = map(lambda what: what[1], fdesc[2])
bad_params = filter(lambda flag: flag & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FRETVAL)!=0, param_flags)
s = None
if len(bad_params)==0 and len(retDesc)==2 and retDesc[1]==0:
rd = retDesc[0]
if NoTranslateMap.has_key(rd):
s = '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, _BuildArgList(fdesc, names))
elif rd in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN]:
s = '%s\tret = self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)\n' % (linePrefix, id, fdesc[4], retDesc, `argsDesc`, _BuildArgList(fdesc, names))
s = s + '%s\tif ret is not None:\n' % (linePrefix,)
if rd == pythoncom.VT_UNKNOWN:
s = s + "%s\t\t# See if this IUnknown is really an IDispatch\n" % (linePrefix,)
s = s + "%s\t\ttry:\n" % (linePrefix,)
s = s + "%s\t\t\tret = ret.QueryInterface(pythoncom.IID_IDispatch)\n" % (linePrefix,)
s = s + "%s\t\texcept pythoncom.error:\n" % (linePrefix,)
s = s + "%s\t\t\treturn ret\n" % (linePrefix,)
s = s + '%s\t\tret = Dispatch(ret, %s, %s, UnicodeToString=%d)\n' % (linePrefix,`name`, resclsid, NeedUnicodeConversions)
s = s + '%s\treturn ret' % (linePrefix)
elif rd == pythoncom.VT_BSTR:
if NeedUnicodeConversions:
s = "%s\t# Result is a Unicode object - perform automatic string conversion\n" % (linePrefix,)
s = s + '%s\treturn str(self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s))' % (linePrefix, id, fdesc[4], retDesc, `argsDesc`, _BuildArgList(fdesc, names))
else:
s = "%s\t# Result is a Unicode object - return as-is for this version of Python\n" % (linePrefix,)
s = s + '%s\treturn self._oleobj_.InvokeTypes(%d, LCID, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, `argsDesc`, _BuildArgList(fdesc, names))
# else s remains None
if s is None:
s = '%s\treturn self._ApplyTypes_(%d, %s, %s, %s, %s, %s%s)' % (linePrefix, id, fdesc[4], retDesc, argsDesc, `name`, resclsid, _BuildArgList(fdesc, names))
ret.append(s)
ret.append("")
return ret
def MakeVarArgsFuncMethod(self, entry, name, bMakeClass = 1):
fdesc = entry.desc
names = entry.names
doc = entry.doc
ret = []
argPrefix = "self"
if bMakeClass:
linePrefix = "\t"
else:
linePrefix = ""
ret.append(linePrefix + 'def ' + name + '(' + argPrefix + ', *args):')
if doc and doc[1]: ret.append(linePrefix + '\t' + _safeQuotedString(doc[1]))
if fdesc:
invoketype = fdesc[4]
else:
invoketype = pythoncom.DISPATCH_METHOD
s = linePrefix + '\treturn self._get_good_object_(self._oleobj_.Invoke(*(('
ret.append(s + str(entry.dispid) + ",0,%d,1)+args)),'%s')" % (invoketype, names[0]))
ret.append("")
return ret
# Note - "DispatchItem" poorly named - need a new intermediate class.
class VTableItem(DispatchItem):
def Build(self, typeinfo, attr, bForUser = 1):
DispatchItem.Build(self, typeinfo, attr, bForUser)
assert typeinfo is not None, "Cant build vtables without type info!"
def cmp_vtable_off(m1, m2):
return cmp(m1.desc[7], m2.desc[7])
meth_list = self.mapFuncs.values() + self.propMapGet.values() + self.propMapPut.values()
meth_list.sort( cmp_vtable_off )
# Now turn this list into the run-time representation
# (ready for immediate use or writing to gencache)
self.vtableFuncs = []
for entry in meth_list:
self.vtableFuncs.append( (entry.names, entry.dispid, entry.desc) )
# A Lazy dispatch item - builds an item on request using info from
# an ITypeComp. The dynamic module makes the called to build each item,
# and also holds the references to the typeinfo and typecomp.
class LazyDispatchItem(DispatchItem):
typename = "LazyDispatchItem"
def __init__(self, attr, doc):
self.clsid = attr[0]
DispatchItem.__init__(self, None, attr, doc, 0)
typeSubstMap = {
pythoncom.VT_INT: pythoncom.VT_I4,
pythoncom.VT_UINT: pythoncom.VT_I4,
pythoncom.VT_HRESULT: pythoncom.VT_I4,
}
def _ResolveType(typerepr, itypeinfo):
# Resolve VT_USERDEFINED (often aliases or typed IDispatches)
if type(typerepr)==types.TupleType:
indir_vt, subrepr = typerepr
if indir_vt == pythoncom.VT_PTR:
# If it is a VT_PTR to a VT_USERDEFINED that is an IDispatch/IUnknown,
# then it resolves to simply the object.
# Otherwise, it becomes a ByRef of the resolved type
# We need to drop an indirection level on pointer to user defined interfaces.
# eg, (VT_PTR, (VT_USERDEFINED, somehandle)) needs to become VT_DISPATCH
# only when "somehandle" is an object.
# but (VT_PTR, (VT_USERDEFINED, otherhandle)) doesnt get the indirection dropped.
was_user = type(subrepr)==types.TupleType and subrepr[0]==pythoncom.VT_USERDEFINED
subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo)
if was_user and subrepr in [pythoncom.VT_DISPATCH, pythoncom.VT_UNKNOWN, pythoncom.VT_RECORD]:
# Drop the VT_PTR indirection
return subrepr, sub_clsid, sub_doc
# Change PTR indirection to byref
return subrepr | pythoncom.VT_BYREF, sub_clsid, sub_doc
if indir_vt == pythoncom.VT_SAFEARRAY:
# resolve the array element, and convert to VT_ARRAY
subrepr, sub_clsid, sub_doc = _ResolveType(subrepr, itypeinfo)
return pythoncom.VT_ARRAY | subrepr, sub_clsid, sub_doc
if indir_vt == pythoncom.VT_CARRAY: # runtime has no support for this yet.
# resolve the array element, and convert to VT_CARRAY
# sheesh - return _something_
return pythoncom.VT_CARRAY, None, None
if indir_vt == pythoncom.VT_USERDEFINED:
try:
resultTypeInfo = itypeinfo.GetRefTypeInfo(subrepr)
except pythoncom.com_error, details:
if details[0] in [winerror.TYPE_E_CANTLOADLIBRARY,
winerror.TYPE_E_LIBNOTREGISTERED]:
# an unregistered interface
return pythoncom.VT_UNKNOWN, None, None
raise
resultAttr = resultTypeInfo.GetTypeAttr()
typeKind = resultAttr.typekind
if typeKind == pythoncom.TKIND_ALIAS:
tdesc = resultAttr.tdescAlias
return _ResolveType(tdesc, resultTypeInfo)
elif typeKind in [pythoncom.TKIND_ENUM, pythoncom.TKIND_MODULE]:
# For now, assume Long
return pythoncom.VT_I4, None, None
elif typeKind == pythoncom.TKIND_DISPATCH:
clsid = resultTypeInfo.GetTypeAttr()[0]
retdoc = resultTypeInfo.GetDocumentation(-1)
return pythoncom.VT_DISPATCH, clsid, retdoc
elif typeKind in [pythoncom.TKIND_INTERFACE,
pythoncom.TKIND_COCLASS]:
# XXX - should probably get default interface for CO_CLASS???
clsid = resultTypeInfo.GetTypeAttr()[0]
retdoc = resultTypeInfo.GetDocumentation(-1)
return pythoncom.VT_UNKNOWN, clsid, retdoc
elif typeKind == pythoncom.TKIND_RECORD:
return pythoncom.VT_RECORD, None, None
raise NotSupportedException("Can not resolve alias or user-defined type")
return typeSubstMap.get(typerepr,typerepr), None, None
def _BuildArgList(fdesc, names):
"Builds list of args to the underlying Invoke method."
# Word has TypeInfo for Insert() method, but says "no args"
numArgs = max(fdesc[6], len(fdesc[2]))
names = list(names)
while None in names:
i = names.index(None)
names[i] = "arg%d" % (i,)
names = map(MakePublicAttributeName, names[1:])
name_num = 0
while len(names) < numArgs:
names.append("arg%d" % (len(names),))
# As per BuildCallList(), avoid huge lines.
# Hack a "\n" at the end of every 5th name - "strides" would be handy
# here but don't exist in 2.2
for i in range(0, len(names), 5):
names[i] = names[i] + "\n\t\t\t"
return "," + string.join(names, ", ")
valid_identifier_chars = string.ascii_letters + string.digits + "_"
def demunge_leading_underscores(className):
i = 0
while className[i] == "_":
i += 1
assert i >= 2, "Should only be here with names starting with '__'"
return className[i-1:] + className[:i-1]
# Given a "public name" (eg, the name of a class, function, etc)
# make sure it is a legal (and reasonable!) Python name.
def MakePublicAttributeName(className, is_global = False):
# Given a class attribute that needs to be public, convert it to a
# reasonable name.
# Also need to be careful that the munging doesnt
# create duplicates - eg, just removing a leading "_" is likely to cause
# a clash.
# if is_global is True, then the name is a global variable that may
# overwrite a builtin - eg, "None"
if className[:2]=='__':
return demunge_leading_underscores(className)
elif iskeyword(className): # all keywords are lower case
return string.capitalize(className)
elif className == 'None':
# assign to None is evil (and SyntaxError in 2.4) - note
# that if it was a global it would get picked up below
className = 'NONE'
elif is_global and __builtins__.has_key(className):
# builtins may be mixed case. If capitalizing it doesn't change it,
# force to all uppercase (eg, "None", "True" become "NONE", "TRUE"
ret = className.capitalize()
if ret==className: # didn't change - force all uppercase.
ret = ret.upper()
return ret
# Strip non printable chars
return filter( lambda char: char in valid_identifier_chars, className)
# Given a default value passed by a type library, return a string with
# an appropriate repr() for the type.
# Takes a raw ELEMDESC and returns a repr string, or None
# (NOTE: The string itself may be '"None"', which is valid, and different to None.
# XXX - To do: Dates are probably screwed, but can they come in?
def MakeDefaultArgRepr(defArgVal):
try:
inOut = defArgVal[1]
except IndexError:
# something strange - assume is in param.
inOut = pythoncom.PARAMFLAG_FIN
if inOut & pythoncom.PARAMFLAG_FHASDEFAULT:
# hack for Unicode until it repr's better.
val = defArgVal[2]
if type(val) is UnicodeType:
return repr(str(val))
elif type(val) is TimeType:
year=val.year; month=val.month; day=val.day; hour=val.hour; minute=val.minute; second=val.second; msec=val.msec
return "pythoncom.MakeTime((%(year)d, %(month)d, %(day)d, %(hour)d, %(minute)d, %(second)d,0,0,0,%(msec)d))" % locals()
else:
return repr(val)
return None
def BuildCallList(fdesc, names, defNamedOptArg, defNamedNotOptArg, defUnnamedArg, defOutArg, is_comment = False):
"Builds a Python declaration for a method."
# Names[0] is the func name - param names are from 1.
numArgs = len(fdesc[2])
numOptArgs = fdesc[6]
strval = ''
if numOptArgs==-1: # Special value that says "var args after here"
firstOptArg = numArgs
numArgs = numArgs - 1
else:
firstOptArg = numArgs - numOptArgs
for arg in xrange(numArgs):
try:
argName = names[arg+1]
namedArg = argName is not None
except IndexError:
namedArg = 0
if not namedArg: argName = "arg%d" % (arg)
thisdesc = fdesc[2][arg]
# See if the IDL specified a default value
defArgVal = MakeDefaultArgRepr(thisdesc)
if defArgVal is None:
# Out params always get their special default
if thisdesc[1] & (pythoncom.PARAMFLAG_FOUT | pythoncom.PARAMFLAG_FIN) == pythoncom.PARAMFLAG_FOUT:
defArgVal = defOutArg
else:
# Unnamed arg - always allow default values.
if namedArg:
# Is a named argument
if arg >= firstOptArg:
defArgVal = defNamedOptArg
else:
defArgVal = defNamedNotOptArg
else:
defArgVal = defUnnamedArg
argName = MakePublicAttributeName(argName)
# insanely long lines with an 'encoding' flag crashes python 2.4.0
# keep 5 args per line
# This may still fail if the arg names are insane, but that seems
# unlikely. See also _BuildArgList()
if (arg+1) % 5 == 0:
strval = strval + "\n"
if is_comment:
strval = strval + "#"
strval = strval + "\t\t\t"
strval = strval + ", " + argName
if defArgVal:
strval = strval + "=" + defArgVal
if numOptArgs==-1:
strval = strval + ", *" + names[-1]
return strval
if __name__=='__main__':
print "Use 'makepy.py' to generate Python code - this module is just a helper"
| epl-1.0 | 213,108,093,031,669,860 | 36.254808 | 159 | 0.660718 | false |
deepmind/surface-distance | surface_distance_test.py | 1 | 14199 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple tests for surface metric computations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import google3
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import surface_distance
from surface_distance.surface_distance import metrics
class SurfaceDistanceTest(parameterized.TestCase, absltest.TestCase):
def _assert_almost_equal(self, expected, actual, places):
"""Assertion wrapper correctly handling NaN equality."""
if np.isnan(expected) and np.isnan(actual):
return
self.assertAlmostEqual(expected, actual, places)
def _assert_metrics(self,
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance,
expected_hausdorff_100,
expected_hausdorff_95,
expected_surface_overlap_at_1mm,
expected_surface_dice_at_1mm,
expected_volumetric_dice,
places=3):
actual_average_surface_distance = (
surface_distance.compute_average_surface_distance(surface_distances))
for i in range(2):
self._assert_almost_equal(
expected_average_surface_distance[i],
actual_average_surface_distance[i],
places=places)
self._assert_almost_equal(
expected_hausdorff_100,
surface_distance.compute_robust_hausdorff(surface_distances, 100),
places=places)
self._assert_almost_equal(
expected_hausdorff_95,
surface_distance.compute_robust_hausdorff(surface_distances, 95),
places=places)
actual_surface_overlap_at_1mm = (
surface_distance.compute_surface_overlap_at_tolerance(
surface_distances, tolerance_mm=1))
for i in range(2):
self._assert_almost_equal(
expected_surface_overlap_at_1mm[i],
actual_surface_overlap_at_1mm[i],
places=places)
self._assert_almost_equal(
expected_surface_dice_at_1mm,
surface_distance.compute_surface_dice_at_tolerance(
surface_distances, tolerance_mm=1),
places=places)
self._assert_almost_equal(
expected_volumetric_dice,
surface_distance.compute_dice_coefficient(mask_gt, mask_pred),
places=places)
@parameterized.parameters((
np.zeros([2, 2, 2], dtype=np.bool),
np.zeros([2, 2], dtype=np.bool),
[1, 1],
), (
np.zeros([2, 2], dtype=np.bool),
np.zeros([2, 2, 2], dtype=np.bool),
[1, 1],
), (
np.zeros([2, 2], dtype=np.bool),
np.zeros([2, 2], dtype=np.bool),
[1, 1, 1],
))
def test_compute_surface_distances_raises_on_incompatible_shapes(
self, mask_gt, mask_pred, spacing_mm):
with self.assertRaisesRegex(ValueError,
'The arguments must be of compatible shape'):
surface_distance.compute_surface_distances(mask_gt, mask_pred, spacing_mm)
@parameterized.parameters((
np.zeros([2], dtype=np.bool),
np.zeros([2], dtype=np.bool),
[1],
), (
np.zeros([2, 2, 2, 2], dtype=np.bool),
np.zeros([2, 2, 2, 2], dtype=np.bool),
[1, 1, 1, 1],
))
def test_compute_surface_distances_raises_on_invalid_shapes(
self, mask_gt, mask_pred, spacing_mm):
with self.assertRaisesRegex(ValueError,
'Only 2D and 3D masks are supported'):
surface_distance.compute_surface_distances(mask_gt, mask_pred, spacing_mm)
class SurfaceDistance2DTest(SurfaceDistanceTest, parameterized.TestCase):
def test_on_2_pixels_2mm_away(self):
mask_gt = np.zeros((128, 128), np.bool)
mask_pred = np.zeros((128, 128), np.bool)
mask_gt[50, 70] = 1
mask_pred[50, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(2, 1))
diag = 0.5 * math.sqrt(2**2 + 1**2)
expected_distances = {
'surfel_areas_gt': np.asarray([diag, diag, diag, diag]),
'surfel_areas_pred': np.asarray([diag, diag, diag, diag]),
'distances_gt_to_pred': np.asarray([1., 1., 2., 2.]),
'distances_pred_to_gt': np.asarray([1., 1., 2., 2.]),
}
self.assertEqual(len(expected_distances), len(surface_distances))
for key, expected_value in expected_distances.items():
np.testing.assert_array_equal(expected_value, surface_distances[key])
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(1.5, 1.5),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.5, 0.5),
expected_surface_dice_at_1mm=0.5,
expected_volumetric_dice=0.0)
def test_two_squares_shifted_by_one_pixel(self):
# We make sure we do not have active pixels on the border of the image,
# because this will add additional 2D surfaces on the border of the image
# because the image is padded with background.
mask_gt = np.asarray(
[
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.bool)
mask_pred = np.asarray(
[
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.bool)
vertical = 2
horizontal = 1
diag = 0.5 * math.sqrt(horizontal**2 + vertical**2)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(vertical, horizontal))
# We go from top left corner, clockwise to describe the surfaces and
# distances. The 2 surfaces are:
#
# /-\ /-\
# | | | |
# \-/ | |
# \-/
expected_surfel_areas_gt = np.asarray(
[diag, horizontal, diag, vertical, diag, horizontal, diag, vertical])
expected_surfel_areas_pred = np.asarray([
diag, horizontal, diag, vertical, vertical, diag, horizontal, diag,
vertical, vertical
])
expected_distances_gt_to_pred = np.asarray([0] * 5 + [horizontal] + [0] * 2)
expected_distances_pred_to_gt = np.asarray([0] * 5 + [vertical] * 3 +
[0] * 2)
# We sort these using the same sorting algorithm
(expected_distances_gt_to_pred, expected_surfel_areas_gt) = (
metrics._sort_distances_surfels(expected_distances_gt_to_pred,
expected_surfel_areas_gt))
(expected_distances_pred_to_gt, expected_surfel_areas_pred) = (
metrics._sort_distances_surfels(expected_distances_pred_to_gt,
expected_surfel_areas_pred))
expected_distances = {
'surfel_areas_gt': expected_surfel_areas_gt,
'surfel_areas_pred': expected_surfel_areas_pred,
'distances_gt_to_pred': expected_distances_gt_to_pred,
'distances_pred_to_gt': expected_distances_pred_to_gt,
}
self.assertEqual(len(expected_distances), len(surface_distances))
for key, expected_value in expected_distances.items():
np.testing.assert_array_equal(expected_value, surface_distances[key])
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(
surface_distance.compute_average_surface_distance(
expected_distances)),
expected_hausdorff_100=(surface_distance.compute_robust_hausdorff(
expected_distances, 100)),
expected_hausdorff_95=surface_distance.compute_robust_hausdorff(
expected_distances, 95),
expected_surface_overlap_at_1mm=(
surface_distance.compute_surface_overlap_at_tolerance(
expected_distances, tolerance_mm=1)),
expected_surface_dice_at_1mm=(
surface_distance.compute_surface_dice_at_tolerance(
surface_distances, tolerance_mm=1)),
expected_volumetric_dice=(surface_distance.compute_dice_coefficient(
mask_gt, mask_pred)))
def test_empty_prediction_mask(self):
mask_gt = np.zeros((128, 128), np.bool)
mask_pred = np.zeros((128, 128), np.bool)
mask_gt[50, 60] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.inf, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(0.0, np.nan),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_empty_ground_truth_mask(self):
mask_gt = np.zeros((128, 128), np.bool)
mask_pred = np.zeros((128, 128), np.bool)
mask_pred[50, 60] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.nan, np.inf),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, 0.0),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_both_empty_masks(self):
mask_gt = np.zeros((128, 128), np.bool)
mask_pred = np.zeros((128, 128), np.bool)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2))
self._assert_metrics(
surface_distances,
mask_gt,
mask_pred,
expected_average_surface_distance=(np.nan, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, np.nan),
expected_surface_dice_at_1mm=np.nan,
expected_volumetric_dice=np.nan)
class SurfaceDistance3DTest(SurfaceDistanceTest):
def test_on_2_pixels_2mm_away(self):
mask_gt = np.zeros((128, 128, 128), np.bool)
mask_pred = np.zeros((128, 128, 128), np.bool)
mask_gt[50, 60, 70] = 1
mask_pred[50, 60, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(1.5, 1.5),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.5, 0.5),
expected_surface_dice_at_1mm=0.5,
expected_volumetric_dice=0.0)
def test_two_cubes_shifted_by_one_pixel(self):
mask_gt = np.zeros((100, 100, 100), np.bool)
mask_pred = np.zeros((100, 100, 100), np.bool)
mask_gt[0:50, :, :] = 1
mask_pred[0:51, :, :] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(2, 1, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(0.322, 0.339),
expected_hausdorff_100=2.0,
expected_hausdorff_95=2.0,
expected_surface_overlap_at_1mm=(0.842, 0.830),
expected_surface_dice_at_1mm=0.836,
expected_volumetric_dice=0.990)
def test_empty_prediction_mask(self):
mask_gt = np.zeros((128, 128, 128), np.bool)
mask_pred = np.zeros((128, 128, 128), np.bool)
mask_gt[50, 60, 70] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.inf, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(0.0, np.nan),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_empty_ground_truth_mask(self):
mask_gt = np.zeros((128, 128, 128), np.bool)
mask_pred = np.zeros((128, 128, 128), np.bool)
mask_pred[50, 60, 72] = 1
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.nan, np.inf),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, 0.0),
expected_surface_dice_at_1mm=0.0,
expected_volumetric_dice=0.0)
def test_both_empty_masks(self):
mask_gt = np.zeros((128, 128, 128), np.bool)
mask_pred = np.zeros((128, 128, 128), np.bool)
surface_distances = surface_distance.compute_surface_distances(
mask_gt, mask_pred, spacing_mm=(3, 2, 1))
self._assert_metrics(
surface_distances, mask_gt, mask_pred,
expected_average_surface_distance=(np.nan, np.nan),
expected_hausdorff_100=np.inf,
expected_hausdorff_95=np.inf,
expected_surface_overlap_at_1mm=(np.nan, np.nan),
expected_surface_dice_at_1mm=np.nan,
expected_volumetric_dice=np.nan)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | 645,911,862,099,925,400 | 37.479675 | 80 | 0.613423 | false |
osmanbaskaya/mapping-impact | run/pw-score-perp-calc.py | 1 | 1989 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Osman Baskaya"
"""
This module calculates F1-Score and sense perplexity of each pseudoword for a given
classifier
"""
import sys
import os
from tempfile import NamedTemporaryFile
from collections import defaultdict as dd
from multiprocessing import Pool
from subprocess import Popen, PIPE
from itertools import izip
from nlp_utils import fopen, calc_perp
key_file = sys.argv[1]
gold_file = sys.argv[2]
out_path = sys.argv[3]
base = key_file.replace("../keys/", '').replace('/', '__').replace(".ans.gz", "")
print >> sys.stderr, base
score_exec = "../keys/scorer.py"
def calc_score(t):
system_f, gold_f = t
p1 = Popen([score_exec, system_f, gold_f], stdout=PIPE)
s = Popen(['grep', 'F1-Score'], stdin=p1.stdout, stdout=PIPE).communicate()[0]
return s.replace("F1-Score is", "").strip()
tw_dict = dd(list)
perp_dict = dd(list)
for line in fopen(key_file):
L = line.split()
tw, gold_tag = L[0], L[-1]
tw_dict[tw].append(line)
gold_dict = dd(list)
for line in fopen(gold_file):
L = line.split()
tw, gold_tag = L[0], L[-1]
perp_dict[tw].append(gold_tag)
gold_dict[tw].append(line)
keys = tw_dict.keys()
if len(keys) != len(gold_dict.keys()):
print >> sys.stderr, "pseudoword numbers not equal between system and gold keys"
perplexities = dict()
files = []
for key in keys:
perplexities[key] = calc_perp(perp_dict[key])
f_system = NamedTemporaryFile(mode='w', delete=False)
f_gold = NamedTemporaryFile(mode='w', delete=False)
f_system.write(''.join(tw_dict[key]))
f_gold.write(''.join(gold_dict[key]))
f_system.flush()
f_gold.flush()
files.append((f_system.name, f_gold.name))
pool = Pool(processes=20)
scores = pool.map(calc_score, files)
output = []
for tw, score in izip(keys, scores):
output.append("{}\t{}\t{}".format(tw, score, perplexities[tw]))
with open(os.path.join(out_path, base), 'w') as fn:
fn.write('\n'.join(output))
| mit | -196,228,552,919,871,800 | 25.52 | 84 | 0.655103 | false |
yajiedesign/mxnet | tests/python/unittest/test_subgraph_op.py | 2 | 25902 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import ctypes
import mxnet as mx
from mxnet.base import SymbolHandle, check_call, _LIB, mx_uint, c_str_array, c_str, mx_real_t
from mxnet.symbol import Symbol
import numpy as np
from mxnet.test_utils import assert_almost_equal, environment
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import nd
import pytest
import tempfile
def network_structure_1():
data1 = mx.sym.var('data1', shape=(2, 3, 10, 10))
data2 = mx.sym.var('data2')
conv1 = mx.sym.Convolution(data=data1, weight=data2, no_bias=True, kernel=(2, 2), num_filter=1)
conv2 = mx.sym.Convolution(data=data2, no_bias=True, kernel=(1, 1), num_filter=1)
out = mx.sym.Group([conv1, conv2])
return (out, ['data1'], [(2, 3, 10, 10)])
def network_structure_2():
# this tests whether the partitioning algorithm can deal with cycles
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret1 = mx.sym.cos(ret)
ret2 = mx.sym.sin(ret)
ret = ret1 + ret2
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_3():
# this tests whether the partitioned sym can distinguish in_args and aux_states
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret1 = mx.sym.cos(ret)
ret2 = mx.sym.sin(ret)
ret = ret1 + ret2
ret = mx.sym.BatchNorm(ret)
ret = mx.sym.BatchNorm(ret)
# Return the same and shape of 'data' and auxiliary states
return (ret, ['data'] + ret.list_auxiliary_states(), [(2, 3, 10, 10), (3,), (3,), (3,), (3,)])
def network_structure_4():
# the last op has multiple duplicate outputs
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret = mx.sym.Group([ret, ret, ret])
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_5():
# the subgraph has two duplicate input entries
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = data + data
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_6():
data1 = mx.sym.Variable('data1', shape=(3, 3, 10, 10), dtype=np.float32)
data2 = mx.sym.Variable('data2', shape=(1, 0, 2, 2))
data3 = mx.sym.sin(data2)
conv = mx.sym.Convolution(data=data1, weight=data3, kernel=(2, 2), num_filter=1)
return (conv, ['data1'], [(3, 3, 10, 10)])
def network_structure_7():
# in this graph, the subgraph node and the other two external nodes form a cycle
data = mx.sym.Variable('data', shape=(1,))
ret1 = mx.sym.sin(data)
ret2 = mx.sym.cos(ret1)
for _ in range(5):
ret2 = mx.sym.cos(ret2)
ret = ret1 + ret2
return (ret, ['data'], [(1,)])
def network_structure_8():
# in this graph, two nodes in the subgraph consume the same input, and
# and two nodes outside the subgraph consume a single output from the subgraph
data = mx.sym.Variable('data', shape=(1,))
sin1 = mx.sym.sin(data)
sin2 = mx.sym.sin(data)
plus = sin1 + sin2
ret1 = mx.sym.cos(plus)
ret2 = mx.sym.cos(plus)
ret = ret1 - ret2
return (ret, ['data'], [(1,)])
def get_graphs():
return [
(network_structure_1(), ['Convolution']),
(network_structure_2(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']),
(network_structure_2(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']),
(network_structure_3(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']),
(network_structure_3(), ['exp', 'BatchNorm']),
(network_structure_3(), ['BatchNorm']),
(network_structure_4(), ['exp']),
(network_structure_5(), ['_plus', '_Plus', 'elemwise_add']),
(network_structure_6(), []),
(network_structure_6(), [mx.sym.sin.__name__]),
(network_structure_6(), [mx.sym.Convolution.__name__]),
(network_structure_6(), [mx.sym.sin.__name__, mx.sym.Convolution.__name__]),
(network_structure_7(), ['sin', 'elemwise_add', '_plus', '_Plus']),
(network_structure_8(), ['sin', 'elemwise_add'])
]
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe1(sym, subgraph_backend, op_names):
"""Use the partitioned sym to _simple_bind an executor and compare the outputs
with those of the original executor"""
sym, _, _ = sym
out = SymbolHandle()
check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names), ctypes.byref(out)))
partitioned_sym = Symbol(out)
assert partitioned_sym.list_inputs() == sym.list_inputs()
assert partitioned_sym.list_arguments() == sym.list_arguments()
assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states()
exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
partitioned_exe = partitioned_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
for name in input_names:
if name in exe.arg_dict:
exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)
partitioned_exe.arg_dict[name][:] = exe.arg_dict[name]
else:
assert name in exe.aux_dict
exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)
partitioned_exe.aux_dict[name][:] = exe.aux_dict[name]
exe.forward()
partitioned_exe.forward()
assert len(exe.outputs) == len(partitioned_exe.outputs)
for i in range(len(exe.outputs)):
assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe2(sym, subgraph_backend, op_names):
"""Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in _simple_bind
and compare results of the partitioned sym and the original sym."""
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
exe = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
for name in input_names:
if name in exe.arg_dict:
exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
if original_exec is None else original_exec.arg_dict[name]
else:
assert name in exe.aux_dict
exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
if original_exec is None else original_exec.aux_dict[name]
exe.forward()
return exe
sym, _, _ = sym
original_exec = get_executor(sym)
with environment('MXNET_SUBGRAPH_BACKEND', subgraph_backend):
check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
partitioned_exec = get_executor(sym, subgraph_backend, op_names, original_exec)
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
outputs1 = original_exec.outputs
outputs2 = partitioned_exec.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe3(sym, subgraph_backend, op_names):
"""Use the partitioned sym to bind an executor and compare the outputs
with those of the original executor"""
sym, _, _ = sym
out = SymbolHandle()
check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names), ctypes.byref(out)))
partitioned_sym = Symbol(out)
input_names = sym.list_inputs()
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
assert partitioned_sym.list_inputs() == input_names
assert partitioned_sym.list_arguments() == arg_names
assert partitioned_sym.list_auxiliary_states() == aux_names
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe = sym._bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
partitioned_exe = partitioned_sym._bind(ctx=mx.current_context(), args=arg_array,
aux_states=aux_array, grad_req='null')
exe.forward()
partitioned_exe.forward()
assert len(exe.outputs) == len(partitioned_exe.outputs)
for i in range(len(exe.outputs)):
assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe4(sym, subgraph_backend, op_names):
"""Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in bind
and compare results of the partitioned sym and the original sym."""
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
arg_shapes, _, aux_shapes = sym.infer_shape()
if subgraph_backend is None:
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
else:
arg_array = None
aux_array = None
exe = sym._bind(ctx=mx.current_context(),
args=arg_array if subgraph_backend is None else original_exec.arg_arrays,
aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays,
grad_req='null')
exe.forward()
return exe
sym, _, _ = sym
original_exec = get_executor(sym)
with environment('MXNET_SUBGRAPH_BACKEND', subgraph_backend):
check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
partitioned_exec = get_executor(sym, subgraph_backend, op_names, original_exec)
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
outputs1 = original_exec.outputs
outputs2 = partitioned_exec.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
def set_random_inputs(exe1, input_names):
"""Sets random values to exe1's args and auxs"""
for name in input_names:
if name in exe1.arg_dict:
exe1.arg_dict[name][:] = mx.nd.random.uniform(shape=exe1.arg_dict[name].shape)
else:
assert name in exe1.aux_dict
exe1.aux_dict[name][:] = mx.nd.random.uniform(shape=exe1.aux_dict[name].shape)
def copy_inputs_between_executors(exe1, exe2, input_names):
"""Copies values of args and auxs from exe1 to exe2"""
for name in input_names:
if name in exe2.arg_dict:
exe2.arg_dict[name][:] = exe1.arg_dict[name]
else:
assert name in exe2.aux_dict
exe2.aux_dict[name][:] = exe1.aux_dict[name]
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe5(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning without infer shapes/types before,
then _simple_bind and compare results of the partitioned sym and the original sym."""
# _simple_bind
sym, _, _ = sym
exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
set_random_inputs(exe1, input_names)
exe1.forward()
# partition before _simple_bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
copy_inputs_between_executors(exe1, exe2, input_names)
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe6(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning with shapes/types, then _simple_bind
and compare results of the partitioned sym and the original sym."""
# _simple_bind
sym, _, _ = sym
exe1 = sym._simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
set_random_inputs(exe1, input_names)
exe1.forward()
# infer shape/type before partition before _simple_bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym._simple_bind(ctx=mx.current_context(), grad_req='null')
copy_inputs_between_executors(exe1, exe2, input_names)
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe7(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning without infer shapes/types before,
then bind and compare results of the partitioned sym and the original sym."""
# bind
sym, _, _ = sym
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe1 = sym._bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe1.forward()
# partition before bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym._bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe8(sym, subgraph_backend, op_names):
"""Call optimize_for to infer shapes, types and dtypes followed by graph partitioning,
then bind and compare results of the partitioned sym and the original sym."""
# bind
sym, _, _ = sym
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)}
aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)}
exe1 = sym._bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
exe1.forward()
# infer shape/type before partition before bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym._bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe9(sym, subgraph_backend, op_names):
"""Call optimize_for to infer shapes, types and dtypes followed by graph partitioning and
dedup subgraph, then bind and compare results of the partitioned sym and the original sym."""
# bind
sym, _, _ = sym
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
arg_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(arg_names,arg_shapes)}
aux_dict = {name:mx.nd.random.uniform(shape=shape) for name,shape in zip(aux_names,aux_shapes)}
exe1 = sym._bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
exe1.forward()
# infer shape/type before partition before bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend, arg_dict, aux_dict, dedup_subgraph=True)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym._bind(ctx=mx.current_context(), args=arg_dict, aux_states=aux_dict, grad_req='null')
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_backend_gluon(sym, subgraph_backend, op_names, tmpdir):
"""Call hybridize() to partition the graph, and then compare results of the partitioned
sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
which means we'll pass shapes/types"""
# create Gluon block for given symbol
inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
sym_block = nn.SymbolBlock(sym[0], inputs)
sym_block.initialize(ctx=mx.current_context())
x = [mx.nd.random.uniform(shape=s,ctx=mx.current_context()) for s in sym[2]]
# hybridize and export to get baseline
sym_block.hybridize()
outputs1 = sym_block(*x)
_, json_path = tempfile.mkstemp(suffix='-symbol.json', dir=str(tmpdir))
export_path = json_path.replace('-symbol.json', '')
params_path = export_path + '-0000.params'
sym_block.export(export_path)
# load model and partition
sym_block = nn.SymbolBlock.imports(json_path,sym[1], params_path,
ctx=mx.current_context())
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
sym_block.hybridize(backend=subgraph_backend)
outputs2 = sym_block(*x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
# Test Gluon HybridBlocks for graph partitioning a network created by HybridSequential.
@pytest.mark.serial
def test_subgraph_backend_gluon_ext1(tmpdir):
def get_net():
net = nn.HybridSequential() # Here we use the class HybridSequential.
net.add(nn.Dense(256, activation='relu'),
nn.Dense(128, activation='relu'),
nn.Dense(2))
return net
# regular inference
x = nd.random.normal(shape=(1, 512),ctx=mx.current_context())
net = get_net()
net.initialize(ctx=mx.current_context())
outputs1 = net(x)
param_path = os.path.join(str(tmpdir), 'test_subgraph_backend_gluon_ext1.params')
net.save_parameters(param_path)
# after partitioning
net = get_net()
net.load_parameters(param_path,ctx=mx.current_context())
subgraph_backend = 'default'
op_names = ['FullyConnected']
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
net.hybridize(backend = subgraph_backend)
outputs2 = net(x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
# Test Gluon HybridBlocks for graph partitioning a network created by HybridBlock.
@pytest.mark.serial
def test_subgraph_backend_gluon_ext2(tmpdir):
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
self.fc1 = nn.Dense(256)
self.fc2 = nn.Dense(128)
self.fc3 = nn.Dense(2)
def hybrid_forward(self, F, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
# regular inference
x = nd.random.normal(shape=(1, 512),ctx=mx.current_context())
net = Net()
net.initialize(ctx=mx.current_context())
outputs1 = net(x)
param_path = os.path.join(str(tmpdir), 'test_subgraph_backend_gluon_ext2.params')
net.save_parameters(param_path)
# after partitioning
net = Net()
net.load_parameters(param_path, ctx=mx.current_context())
subgraph_backend = 'default'
op_names = ['FullyConnected']
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
net.hybridize(backend = subgraph_backend)
outputs2 = net(x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
if __name__ == "__main__":
import datetime
tmpdir = datetime.datetime.now().strftime('mylogfile_%H_%M_%S_%f_%d_%m_%Y.log')
os.mkdir(tmpdir)
subgraph_backends = ['default', 'default_v2']
graphs = get_graphs()
for subgraph_backend in subgraph_backends:
for sym,op_names in graphs:
test_subgraph_exe1(sym, subgraph_backend, op_names)
test_subgraph_exe2(sym, subgraph_backend, op_names)
test_subgraph_exe3(sym, subgraph_backend, op_names)
test_subgraph_exe4(sym, subgraph_backend, op_names)
test_subgraph_exe5(sym, subgraph_backend, op_names)
test_subgraph_exe6(sym, subgraph_backend, op_names)
test_subgraph_exe7(sym, subgraph_backend, op_names)
test_subgraph_exe8(sym, subgraph_backend, op_names)
test_subgraph_exe9(sym, subgraph_backend, op_names)
test_subgraph_backend_gluon(sym, subgraph_backend, op_names, tmpdir)
test_subgraph_backend_gluon_ext1(tmpdir)
test_subgraph_backend_gluon_ext2(tmpdir)
| apache-2.0 | -501,251,097,016,007,550 | 46.789668 | 106 | 0.643464 | false |
gem/oq-hazardlib | openquake/hazardlib/calc/__init__.py | 1 | 1180 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Package :mod:`openquake.hazardlib.calc` contains hazard calculator modules
and utilities for them, such as :mod:`~openquake.hazardlib.calc.filters`.
"""
from openquake.hazardlib.calc.gmf import ground_motion_fields
from openquake.hazardlib.calc.stochastic import stochastic_event_set
# from disagg we want to import main calc function
# as well as all the pmf extractors
from openquake.hazardlib.calc.disagg import *
| agpl-3.0 | -4,664,859,067,582,426,000 | 42.703704 | 74 | 0.772034 | false |
lukas-krecan/tensorflow | tensorflow/python/platform/gfile.py | 11 | 1205 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Switch between depending on pyglib.gfile or an OSS replacement."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
# pylint: disable=g-import-not-at-top
# pylint: disable=wildcard-import
import tensorflow.python.platform
from . import control_imports
if control_imports.USE_OSS and control_imports.OSS_GFILE:
from tensorflow.python.platform.default._gfile import *
else:
from tensorflow.python.platform.google._gfile import *
| apache-2.0 | 7,308,468,893,456,987,000 | 40.551724 | 80 | 0.719502 | false |
marcosfede/algorithms | adventofcode/2018/7/p2.py | 1 | 1255 | import heapq
from collections import defaultdict
import re
nodes = set()
edges = defaultdict(set)
with open('input') as f:
for line in f:
regex = r'Step (.) must be finished before step (.) can begin\.'
match = re.match(regex, line)
src, dest = match.group(1), match.group(2)
nodes.add(src)
nodes.add(dest)
edges[src].add(dest)
def score(x): return ord(x) - 4
timeremaining = {k: score(k) for k in nodes}
# build incoming connections map
incomings = {k: 0 for k in nodes}
for src, neighbours in edges.items():
for dest in neighbours:
incomings[dest] += 1
time = 0
queue = [k for k in nodes if incomings[k] == 0]
heapq.heapify(queue)
processing = set()
while queue or processing:
time += 1
idle = 5 - len(processing)
if idle > 0:
for worker in range(min(idle, len(queue))):
processing.add(heapq.heappop(queue))
done = set()
for task in processing:
timeremaining[task] -= 1
if timeremaining[task] == 0:
done.add(task)
for dest in edges[task]:
incomings[dest] -= 1
if incomings[dest] == 0:
heapq.heappush(queue, dest)
processing -= done
print(time)
| gpl-3.0 | 7,448,622,851,404,552,000 | 24.1 | 72 | 0.590438 | false |
githubmlai/numpy | numpy/core/setup.py | 8 | 41303 | from __future__ import division, print_function
import imp
import os
import sys
import pickle
import copy
import warnings
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from numpy.build_utils.apple_accelerate import uses_accelerate_framework, get_sgemv_fix
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [16, 12, 8]
expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def,
expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
join(codegen_dir, 'genapi.py'),
]
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources=[join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
join(local_dir, 'src', 'private', 'templ_common.h.src')
]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
join('src', 'private', 'npy_config.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
join('src', 'private', 'templ_common.h.src'),
]
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
if uses_accelerate_framework(blas_info):
multiarray_src.extend(get_sgemv_fix())
else:
extra_info = {}
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends=deps + multiarray_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'scalarmath.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources=umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends=deps + umath_deps,
libraries=['npymath'],
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources=[join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources=[join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources=[join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources=[join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| bsd-3-clause | 5,261,013,120,446,992,000 | 41.188968 | 97 | 0.530058 | false |
simone-campagna/daikon | zirkon/toolbox/dictutils.py | 2 | 3770 | # -*- coding: utf-8 -*-
#
# Copyright 2013 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Utility function for dictionaries.
"""
import collections
__author__ = "Simone Campagna"
__copyright__ = 'Copyright (c) 2015 Simone Campagna'
__license__ = 'Apache License Version 2.0'
__all__ = [
'as_dict',
'compare_dicts',
'transform',
]
def as_dict(dct, *, depth=-1, dict_class=dict):
"""Returns a dict with the same content of the dct mapping.
Parameters
----------
dct: |Mapping|
a dict-like object (dict, OrderedDict, Section, ...)
depth: int, optional
the depth of the copy (< 0 means full copy)
dict_class: type, optional
the dict class to be used for the copy
Returns
-------
dict_class
the converted dict
"""
stddct = dict_class(dct)
dcts = [stddct]
while depth != 0 and dcts:
next_dcts = []
for dct in dcts:
for key, value in dct.items():
if isinstance(value, collections.Mapping):
dct_value = dict_class(value)
dct[key] = dct_value
next_dcts.append(dct_value)
dcts = next_dcts
if depth > 0:
depth -= 1
return stddct
def compare_dicts(dct0, dct1):
"""Compare two dictionaries. Converts the two dictionaries to standard dicts
before. Used to avoid differences due to key ordering.
Parameters
----------
dct0: |Mapping|
a dict-like object (dict, OrderedDict, Section, ...)
dct1: |Mapping|
a dict-like object (dict, OrderedDict, Section, ...)
Returns
-------
bool
True if the two dicts have the same content
"""
stddct0 = as_dict(dct0, depth=-1, dict_class=dict)
stddct1 = as_dict(dct1, depth=-1, dict_class=dict)
return stddct0 == stddct1
def transform(dct, *, key_transform=None, value_transform=None, dict_class=None):
"""Transforms a dict by applying functions to keys and values.
Parameters
----------
dct: |Mapping|
a dict-like object (dict, OrderedDict, Section, ...)
key_transform: callable, optional
a function to transform keys
value_transform: callable, optional
a function to transform values
dict_class: type, optional
the dict class to be used for the copy
Returns
-------
dict_class
the converted dict
"""
if key_transform is None:
key_transform = lambda key: key # flake8: noqa
if value_transform is None:
value_transform = lambda value: value # flake8: noqa
if dict_class is None:
use_dict_class = type(dct)
else:
use_dict_class = dict_class
resdct = use_dict_class()
for key, value in dct.items():
key = key_transform(key)
if isinstance(value, collections.Mapping):
resdct[key] = transform(
value,
key_transform=key_transform,
value_transform=value_transform,
dict_class=dict_class)
else:
resdct[key] = value_transform(value)
return resdct
| apache-2.0 | -7,304,431,097,113,996,000 | 28.685039 | 81 | 0.593103 | false |
lcostantino/healing-os | healing/api/app.py | 1 | 2032 | # -*- coding: utf-8 -*-
#
# Copyright 2014 - Intel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pecan
from oslo.config import cfg
from healing.api import access_control
from healing import config
from healing.api import hooks
from healing.api import middleware
from healing.db import api as db_api
from healing.handler_manager import get_plugin_handler
def get_pecan_config():
# Set up the pecan configuration.
opts = cfg.CONF.pecan
cfg_dict = {
"app": {
"root": opts.root,
"modules": opts.modules,
"debug": opts.debug,
"auth_enable": opts.auth_enable
}
}
return pecan.configuration.conf_from_dict(cfg_dict)
def setup_app(pecan_config=None, transport=None):
if not pecan_config:
pecan_config = get_pecan_config()
#TODO; pasar db hook?
app_hooks = [hooks.ConfigHook(),
hooks.TranslationHook(),
hooks.CustomErrorHook(),
]
#if config.CONF.pecan.auth_enable:
app_hooks.append(access_control.DelayedAuthHook())
app_conf = dict(pecan_config.app)
db_api.setup_db()
app = pecan.make_app(
app_conf.pop('root'),
hooks=app_hooks,
logging=getattr(config, 'logging', {}),
wrap_app=middleware.ParsableErrorMiddleware,
guess_content_type_from_ext=False,
**app_conf
)
# Set up access control.
app = access_control.setup(app)
get_plugin_handler()
return app
| apache-2.0 | -5,739,785,142,211,357,000 | 27.222222 | 77 | 0.649606 | false |
xiaom-GitHub/openthread | tests/scripts/thread-cert/Cert_6_5_01_ChildResetSynchronize.py | 4 | 3434 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ED = 2
class Cert_6_5_1_ChildResetSynchronize(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,3):
self.nodes[i] = node.Node(i, (i == ED))
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].set_timeout(3)
self._setUpEd()
def _setUpEd(self):
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ED].reset()
self._setUpEd()
time.sleep(5)
self.nodes[ED].set_timeout(100)
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ED].reset()
self._setUpEd()
time.sleep(5)
self.nodes[ED].set_timeout(100)
self.nodes[ED].start()
time.sleep(5)
self.assertEqual(self.nodes[ED].get_state(), 'child')
addrs = self.nodes[ED].get_addrs()
for addr in addrs:
if addr[0:4] == 'fe80':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -3,673,567,928,461,065,000 | 35.147368 | 78 | 0.662784 | false |
onitake/ansible | lib/ansible/modules/network/f5/bigip_log_publisher.py | 5 | 12922 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_log_publisher
short_description: Manages log publishers on a BIG-IP
description:
- Manages log publishers on a BIG-IP.
version_added: 2.6
options:
name:
description:
- Specifies the name of the log publisher.
required: True
description:
description:
- Specifies a description for the log publisher.
destinations:
description:
- Specifies log destinations for this log publisher to use.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the resource exists.
- When C(absent), ensures the resource is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a log publisher for use in high speed loggins
bigip_log_publisher:
name: publisher1
destinations:
- hsl1
- security-log-servers-logging
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the resource.
returned: changed
type: string
sample: "Security log publisher"
destinations:
description: The new list of destinations for the resource.
returned: changed
type: list
sample: ['/Common/destination1', '/Common/destination2']
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.compare import cmp_simple_list
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.compare import cmp_simple_list
class Parameters(AnsibleF5Parameters):
api_map = {
}
api_attributes = [
'destinations',
'description',
]
returnables = [
'destinations',
'description',
]
updatables = [
'destinations',
'description',
]
class ApiParameters(Parameters):
@property
def destinations(self):
if self._values['destinations'] is None:
return None
results = []
for destination in self._values['destinations']:
result = fq_name(destination['partition'], destination['name'])
results.append(result)
results.sort()
return results
class ModuleParameters(Parameters):
@property
def destinations(self):
if self._values['destinations'] is None:
return None
if len(self._values['destinations']) == 1 and self._values['destinations'][0] == '':
return ''
result = [fq_name(self.partition, x) for x in self._values['destinations']]
result = list(set(result))
result.sort()
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def destinations(self):
result = cmp_simple_list(self.want.destinations, self.have.destinations)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/log-config/publisher/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
destinations=dict(type='list'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,343,084,928,863,469,000 | 29.121212 | 92 | 0.602925 | false |
openstack/cinder | cinder/api/schemas/groups.py | 3 | 4956 | # Copyright (C) 2018 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Schema for V3 Generic Volume Groups API.
"""
from cinder.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'group': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'group_type': {
'type': 'string', 'format': 'group_type'
},
'name': parameter_types.name_allow_zero_min_length,
'volume_types': {
'type': 'array', 'minItems': 1,
'items': {
'type': 'string', 'maxLength': 255,
},
'uniqueItems': True
},
'availability_zone': {
'type': ['string', 'null'], 'format': 'availability_zone'
},
},
'required': ['group_type', 'volume_types'],
'additionalProperties': False,
},
},
'required': ['group'],
'additionalProperties': False,
}
create_from_source = {
'type': 'object',
'properties': {
'create-from-src': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'name': parameter_types.name_allow_zero_min_length,
'source_group_id': parameter_types.uuid,
'group_snapshot_id': parameter_types.uuid,
},
'oneOf': [
{'required': ['group_snapshot_id']},
{'required': ['source_group_id']}
],
'additionalProperties': False,
},
},
'required': ['create-from-src'],
'additionalProperties': False,
}
delete = {
'type': 'object',
'properties': {
'delete': {
'type': 'object',
'properties': {
'delete-volumes': parameter_types.boolean,
},
'additionalProperties': False,
},
},
'required': ['delete'],
'additionalProperties': False,
}
reset_status = {
'type': 'object',
'properties': {
'reset_status': {
'type': 'object',
'properties': {
'status': {
'type': 'string', 'format': 'group_status'
},
},
'required': ['status'],
'additionalProperties': False,
},
},
'required': ['reset_status'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'group': {
'type': 'object',
'properties': {
'description': parameter_types.description,
'name': parameter_types.name_allow_zero_min_length,
'add_volumes': parameter_types.description,
'remove_volumes': parameter_types.description,
},
'anyOf': [
{'required': ['name']},
{'required': ['description']},
{'required': ['add_volumes']},
{'required': ['remove_volumes']},
],
'additionalProperties': False,
},
},
'required': ['group'],
'additionalProperties': False,
}
failover_replication = {
'type': 'object',
'properties': {
'failover_replication': {
'type': 'object',
'properties': {
'allow_attached_volume': parameter_types.boolean,
'secondary_backend_id': parameter_types.nullable_string,
},
'additionalProperties': False,
},
},
'required': ['failover_replication'],
'additionalProperties': False,
}
list_replication = {
'type': 'object',
'properties': {
'list_replication_targets': {'type': 'object'}
},
'required': ['list_replication_targets'],
'additionalProperties': False,
}
enable_replication = {
'type': 'object',
'properties': {
'enable_replication': {'type': 'object'}
},
'required': ['enable_replication'],
'additionalProperties': False,
}
disable_replication = {
'type': 'object',
'properties': {
'disable_replication': {'type': 'object'}
},
'required': ['disable_replication'],
'additionalProperties': False,
}
| apache-2.0 | -7,871,887,999,568,972,000 | 27.647399 | 78 | 0.503228 | false |
indevgr/django | tests/forms_tests/field_tests/test_integerfield.py | 4 | 5979 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import IntegerField, Textarea, ValidationError
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class IntegerFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_integerfield_1(self):
f = IntegerField()
self.assertWidgetRendersTo(f, '<input type="number" name="f" id="id_f" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean('')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('a')
self.assertEqual(42, f.clean(42))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean(3.14)
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_integerfield_2(self):
f = IntegerField(required=False)
self.assertIsNone(f.clean(''))
self.assertEqual('None', repr(f.clean('')))
self.assertIsNone(f.clean(None))
self.assertEqual('None', repr(f.clean(None)))
self.assertEqual(1, f.clean('1'))
self.assertIsInstance(f.clean('1'), int)
self.assertEqual(23, f.clean('23'))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('a')
self.assertEqual(1, f.clean('1 '))
self.assertEqual(1, f.clean(' 1'))
self.assertEqual(1, f.clean(' 1 '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1a')
self.assertIsNone(f.max_value)
self.assertIsNone(f.min_value)
def test_integerfield_3(self):
f = IntegerField(max_value=10)
self.assertWidgetRendersTo(f, '<input max="10" type="number" name="f" id="id_f" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual(1, f.clean(1))
self.assertEqual(10, f.clean(10))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'"):
f.clean(11)
self.assertEqual(10, f.clean('10'))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 10.'"):
f.clean('11')
self.assertEqual(f.max_value, 10)
self.assertIsNone(f.min_value)
def test_integerfield_4(self):
f = IntegerField(min_value=10)
self.assertWidgetRendersTo(f, '<input id="id_f" type="number" name="f" min="10" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'"):
f.clean(1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertIsNone(f.max_value)
self.assertEqual(f.min_value, 10)
def test_integerfield_5(self):
f = IntegerField(min_value=10, max_value=20)
self.assertWidgetRendersTo(f, '<input id="id_f" max="20" type="number" name="f" min="10" />')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
with self.assertRaisesMessage(ValidationError, "'Ensure this value is greater than or equal to 10.'"):
f.clean(1)
self.assertEqual(10, f.clean(10))
self.assertEqual(11, f.clean(11))
self.assertEqual(10, f.clean('10'))
self.assertEqual(11, f.clean('11'))
self.assertEqual(20, f.clean(20))
with self.assertRaisesMessage(ValidationError, "'Ensure this value is less than or equal to 20.'"):
f.clean(21)
self.assertEqual(f.max_value, 20)
self.assertEqual(f.min_value, 10)
def test_integerfield_localized(self):
"""
A localized IntegerField's widget renders to a text input without any
number input specific attributes.
"""
f1 = IntegerField(localize=True)
self.assertWidgetRendersTo(f1, '<input id="id_f" name="f" type="text" />')
def test_integerfield_float(self):
f = IntegerField()
self.assertEqual(1, f.clean(1.0))
self.assertEqual(1, f.clean('1.0'))
self.assertEqual(1, f.clean(' 1.0 '))
self.assertEqual(1, f.clean('1.'))
self.assertEqual(1, f.clean(' 1. '))
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('1.5')
with self.assertRaisesMessage(ValidationError, "'Enter a whole number.'"):
f.clean('…')
def test_integerfield_big_num(self):
f = IntegerField()
self.assertEqual(9223372036854775808, f.clean(9223372036854775808))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808'))
self.assertEqual(9223372036854775808, f.clean('9223372036854775808.0'))
def test_integerfield_subclass(self):
"""
Class-defined widget is not overwritten by __init__() (#22245).
"""
class MyIntegerField(IntegerField):
widget = Textarea
f = MyIntegerField()
self.assertEqual(f.widget.__class__, Textarea)
f = MyIntegerField(localize=True)
self.assertEqual(f.widget.__class__, Textarea)
| bsd-3-clause | -4,634,796,334,554,950,000 | 42.948529 | 110 | 0.623222 | false |
jonathan-beard/edx-platform | cms/djangoapps/contentstore/features/component.py | 14 | 6644 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
# Lettuce formats proposed definitions for unimplemented steps with the
# argument name "step" instead of "_step" and pylint does not like that.
# pylint: disable=unused-argument
from lettuce import world, step
from nose.tools import assert_true, assert_in, assert_equal # pylint: disable=no-name-in-module
DISPLAY_NAME = "Display Name"
@step(u'I add this type of single step component:$')
def add_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
world.create_component_instance(
step=step,
category='{}'.format(component.lower()),
)
@step(u'I see this type of single step component:$')
def see_a_single_step_component(step):
for step_hash in step.hashes:
component = step_hash['Component']
assert_in(component, ['Discussion', 'Video'])
component_css = 'div.xmodule_{}Module'.format(component)
assert_true(world.is_css_present(component_css),
"{} couldn't be found".format(component))
@step(u'I add this type of( Advanced)? (HTML|Problem) component:$')
def add_a_multi_step_component(step, is_advanced, category):
for step_hash in step.hashes:
world.create_component_instance(
step=step,
category='{}'.format(category.lower()),
component_type=step_hash['Component'],
is_advanced=bool(is_advanced),
)
@step(u'I see (HTML|Problem) components in this order:')
def see_a_multi_step_component(step, category):
# Wait for all components to finish rendering
selector = 'li.studio-xblock-wrapper div.xblock-student_view'
world.wait_for(lambda _: len(world.css_find(selector)) == len(step.hashes))
for idx, step_hash in enumerate(step.hashes):
if category == 'HTML':
html_matcher = {
'Text': '\n \n',
'Announcement': '<h3>Announcement Date</h3>',
'Zooming Image Tool': '<h2>Zooming Image Tool</h2>',
'E-text Written in LaTeX': '<h3>Example: E-text page</h3>',
'Raw HTML': '<p>This template is similar to the Text template. The only difference is',
}
actual_html = world.css_html(selector, index=idx)
assert_in(html_matcher[step_hash['Component']].strip(), actual_html.strip())
else:
actual_text = world.css_text(selector, index=idx)
assert_in(step_hash['Component'].upper(), actual_text)
@step(u'I see a "([^"]*)" Problem component$')
def see_a_problem_component(step, category):
component_css = 'div.xmodule_CapaModule'
assert_true(world.is_css_present(component_css),
'No problem was added to the unit.')
problem_css = 'li.studio-xblock-wrapper div.xblock-student_view'
# This view presents the given problem component in uppercase. Assert that the text matches
# the component selected (in uppercase)
assert_true(world.css_contains_text(problem_css, category.upper()))
@step(u'I add a "([^"]*)" "([^"]*)" component$')
def add_component_category(step, component, category):
assert category in ('single step', 'HTML', 'Problem', 'Advanced Problem')
given_string = 'I add this type of {} component:'.format(category)
step.given('{}\n{}\n{}'.format(given_string, '|Component|', '|{}|'.format(component)))
@step(u'I delete all components$')
def delete_all_components(step):
count = len(world.css_find('ol.reorderable-container li.studio-xblock-wrapper'))
step.given('I delete "' + str(count) + '" component')
@step(u'I delete "([^"]*)" component$')
def delete_components(step, number):
world.wait_for_xmodule()
delete_btn_css = 'a.delete-button'
prompt_css = 'div#prompt-warning'
btn_css = '{} a.button.action-primary'.format(prompt_css)
saving_mini_css = 'div#page-notification .wrapper-notification-mini'
for _ in range(int(number)):
world.css_click(delete_btn_css)
assert_true(
world.is_css_present('{}.is-shown'.format(prompt_css)),
msg='Waiting for the confirmation prompt to be shown')
# Pressing the button via css was not working reliably for the last component
# when run in Chrome.
if world.browser.driver_name is 'Chrome':
world.browser.execute_script("$('{}').click()".format(btn_css))
else:
world.css_click(btn_css)
# Wait for the saving notification to pop up then disappear
if world.is_css_present('{}.is-shown'.format(saving_mini_css)):
world.css_find('{}.is-hiding'.format(saving_mini_css))
@step(u'I see no components')
def see_no_components(steps):
assert world.is_css_not_present('li.studio-xblock-wrapper')
@step(u'I delete a component')
def delete_one_component(step):
world.css_click('a.delete-button')
@step(u'I edit and save a component')
def edit_and_save_component(step):
world.css_click('.edit-button')
world.css_click('.save-button')
@step(u'I duplicate the (first|second|third) component$')
def duplicated_component(step, ordinal):
ord_map = {
"first": 0,
"second": 1,
"third": 2,
}
index = ord_map[ordinal]
duplicate_btn_css = 'a.duplicate-button'
world.css_click(duplicate_btn_css, int(index))
@step(u'I see a Problem component with display name "([^"]*)" in position "([^"]*)"$')
def see_component_in_position(step, display_name, index):
component_css = 'div.xmodule_CapaModule'
def find_problem(_driver):
return world.css_text(component_css, int(index)).startswith(display_name.upper())
world.wait_for(find_problem, timeout_msg='Did not find the duplicated problem')
@step(u'I see the display name is "([^"]*)"')
def check_component_display_name(step, display_name):
# The display name for the unit uses the same structure, must differentiate by level-element.
label = world.css_html("section.level-element>header>div>div>span.xblock-display-name")
assert_equal(display_name, label)
@step(u'I change the display name to "([^"]*)"')
def change_display_name(step, display_name):
world.edit_component_and_select_settings()
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, display_name)
world.save_component()
@step(u'I unset the display name')
def unset_display_name(step):
world.edit_component_and_select_settings()
world.revert_setting_entry(DISPLAY_NAME)
world.save_component()
| agpl-3.0 | 5,621,973,888,502,696,000 | 36.536723 | 103 | 0.653371 | false |
WatanabeYasumasa/edx-platform | common/djangoapps/terrain/stubs/http.py | 19 | 8353 | """
Stub implementation of an HTTP service.
"""
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import urlparse
import threading
import json
from functools import wraps
from lazy import lazy
from logging import getLogger
LOGGER = getLogger(__name__)
def require_params(method, *required_keys):
"""
Decorator to ensure that the method has all the required parameters.
Example:
@require_params('GET', 'id', 'state')
def handle_request(self):
# ....
would send a 400 response if no GET parameters were specified
for 'id' or 'state' (or if those parameters had empty values).
The wrapped function should be a method of a `StubHttpRequestHandler`
subclass.
Currently, "GET" and "POST" are the only supported methods.
"""
def decorator(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
# Read either GET querystring params or POST dict params
if method == "GET":
params = self.get_params
elif method == "POST":
params = self.post_dict
else:
raise ValueError("Unsupported method '{method}'".format(method=method))
# Check for required values
missing = []
for key in required_keys:
if params.get(key) is None:
missing.append(key)
if len(missing) > 0:
msg = "Missing required key(s) {keys}".format(keys=",".join(missing))
self.send_response(400, content=msg, headers={'Content-type': 'text/plain'})
# If nothing is missing, execute the function as usual
else:
return func(self, *args, **kwargs)
return wrapper
return decorator
class StubHttpRequestHandler(BaseHTTPRequestHandler, object):
"""
Handler for the stub HTTP service.
"""
protocol = "HTTP/1.0"
def log_message(self, format_str, *args):
"""
Redirect messages to keep the test console clean.
"""
LOGGER.debug(self._format_msg(format_str, *args))
def log_error(self, format_str, *args):
"""
Helper to log a server error.
"""
LOGGER.error(self._format_msg(format_str, *args))
@lazy
def request_content(self):
"""
Retrieve the content of the request.
"""
try:
length = int(self.headers.getheader('content-length'))
except (TypeError, ValueError):
return ""
else:
return self.rfile.read(length)
@lazy
def post_dict(self):
"""
Retrieve the request POST parameters from the client as a dictionary.
If no POST parameters can be interpreted, return an empty dict.
"""
contents = self.request_content
# The POST dict will contain a list of values for each key.
# None of our parameters are lists, however, so we map [val] --> val
# If the list contains multiple entries, we pick the first one
try:
post_dict = urlparse.parse_qs(contents, keep_blank_values=True)
return {
key: list_val[0]
for key, list_val in post_dict.items()
}
except:
return dict()
@lazy
def get_params(self):
"""
Return the GET parameters (querystring in the URL).
"""
query = urlparse.urlparse(self.path).query
# By default, `parse_qs` returns a list of values for each param
# For convenience, we replace lists of 1 element with just the element
return {
k:v[0] if len(v) == 1 else v
for k,v in urlparse.parse_qs(query).items()
}
@lazy
def path_only(self):
"""
Return the URL path without GET parameters.
Removes the trailing slash if there is one.
"""
path = urlparse.urlparse(self.path).path
if path.endswith('/'):
return path[:-1]
else:
return path
def do_PUT(self):
"""
Allow callers to configure the stub server using the /set_config URL.
The request should have POST data, such that:
Each POST parameter is the configuration key.
Each POST value is a JSON-encoded string value for the configuration.
"""
if self.path == "/set_config" or self.path == "/set_config/":
if len(self.post_dict) > 0:
for key, value in self.post_dict.iteritems():
# Decode the params as UTF-8
try:
key = unicode(key, 'utf-8')
value = unicode(value, 'utf-8')
except UnicodeDecodeError:
self.log_message("Could not decode request params as UTF-8")
self.log_message(u"Set config '{0}' to '{1}'".format(key, value))
try:
value = json.loads(value)
except ValueError:
self.log_message(u"Could not parse JSON: {0}".format(value))
self.send_response(400)
else:
self.server.config[key] = value
self.send_response(200)
# No parameters sent to configure, so return success by default
else:
self.send_response(200)
else:
self.send_response(404)
def send_response(self, status_code, content=None, headers=None):
"""
Send a response back to the client with the HTTP `status_code` (int),
`content` (str) and `headers` (dict).
"""
self.log_message(
"Sent HTTP response: {0} with content '{1}' and headers {2}".format(status_code, content, headers)
)
if headers is None:
headers = dict()
BaseHTTPRequestHandler.send_response(self, status_code)
for (key, value) in headers.items():
self.send_header(key, value)
if len(headers) > 0:
self.end_headers()
if content is not None:
self.wfile.write(content)
def send_json_response(self, content):
"""
Send a response with status code 200, the given content serialized as
JSON, and the Content-Type header set appropriately
"""
self.send_response(200, json.dumps(content), {"Content-Type": "application/json"})
def _format_msg(self, format_str, *args):
"""
Format message for logging.
`format_str` is a string with old-style Python format escaping;
`args` is an array of values to fill into the string.
"""
return u"{0} - - [{1}] {2}\n".format(
self.client_address[0],
self.log_date_time_string(),
format_str % args
)
class StubHttpService(HTTPServer, object):
"""
Stub HTTP service implementation.
"""
# Subclasses override this to provide the handler class to use.
# Should be a subclass of `StubHttpRequestHandler`
HANDLER_CLASS = StubHttpRequestHandler
def __init__(self, port_num=0):
"""
Configure the server to listen on localhost.
Default is to choose an arbitrary open port.
"""
address = ('0.0.0.0', port_num)
HTTPServer.__init__(self, address, self.HANDLER_CLASS)
# Create a dict to store configuration values set by the client
self.config = dict()
# Start the server in a separate thread
server_thread = threading.Thread(target=self.serve_forever)
server_thread.daemon = True
server_thread.start()
# Log the port we're using to help identify port conflict errors
LOGGER.debug('Starting service on port {0}'.format(self.port))
def shutdown(self):
"""
Stop the server and free up the port
"""
# First call superclass shutdown()
HTTPServer.shutdown(self)
# We also need to manually close the socket
self.socket.close()
@property
def port(self):
"""
Return the port that the service is listening on.
"""
_, port = self.server_address
return port
| agpl-3.0 | 758,772,310,948,993,500 | 30.052045 | 110 | 0.565066 | false |
tensorflow/models | official/vision/detection/utils/object_detection/shape_utils.py | 1 | 3608 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else:
return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(input=tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(input=tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor, begin=tf.zeros(len(clip_size), dtype=tf.int32), size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(input=clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[tf.zeros(len(trailing_paddings), dtype=tf.int32), trailing_paddings],
axis=1)
padded_tensor = tf.pad(tensor=clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
| apache-2.0 | 5,864,020,762,902,091,000 | 32.719626 | 79 | 0.709812 | false |
edgedb/edgedb | edb/errors/base.py | 1 | 5179 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import *
from edb.common import context as pctx
from edb.common import exceptions as ex
__all__ = (
'EdgeDBError', 'EdgeDBMessage',
)
class EdgeDBErrorMeta(type):
_error_map: Dict[int, Type[EdgeDBError]] = {}
def __new__(mcls, name, bases, dct):
cls = super().__new__(mcls, name, bases, dct)
code = dct.get('_code')
if code is not None:
mcls._error_map[code] = cls
return cls
def __init__(cls, name, bases, dct):
if cls._code is None and cls.__module__ != __name__:
# We don't want any EdgeDBError subclasses to not
# have a code.
raise RuntimeError(
'direct subclassing of EdgeDBError is prohibited; '
'subclass one of its subclasses in edb.errors')
@classmethod
def get_error_class_from_code(mcls, code):
return mcls._error_map[code]
class EdgeDBMessage(Warning):
_code: Optional[int] = None
@classmethod
def get_code(cls):
if cls._code is None:
raise RuntimeError(
f'EdgeDB message code is not set (type: {cls.__name__})')
return cls._code
class EdgeDBError(Exception, metaclass=EdgeDBErrorMeta):
_code: Optional[int] = None
_attrs: Dict[int, str]
def __init__(self, msg: str=None, *,
hint: str=None, details: str=None,
context=None, position: Optional[Tuple[int, int, int]] = None,
token=None):
if type(self) is EdgeDBError:
raise RuntimeError(
'EdgeDBError is not supposed to be instantiated directly')
self.token = token
self._attrs = {}
if isinstance(context, pctx.ParserContext):
self.set_source_context(context)
elif position:
self.set_position(*position)
self.set_hint_and_details(hint, details)
super().__init__(msg)
@classmethod
def get_code(cls):
if cls._code is None:
raise RuntimeError(
f'EdgeDB message code is not set (type: {cls.__name__})')
return cls._code
def set_linecol(self, line, col):
self._attrs[FIELD_LINE_START] = str(line)
self._attrs[FIELD_COLUMN_START] = str(col)
def set_hint_and_details(self, hint, details=None):
ex.replace_context(
self, ex.DefaultExceptionContext(hint=hint, details=details))
if hint is not None:
self._attrs[FIELD_HINT] = hint
if details is not None:
self._attrs[FIELD_DETAILS] = details
def set_source_context(self, context):
start = context.start_point
end = context.end_point
ex.replace_context(self, context)
self._attrs[FIELD_POSITION_START] = str(start.offset)
self._attrs[FIELD_POSITION_END] = str(end.offset)
self._attrs[FIELD_CHARACTER_START] = str(start.char_offset)
self._attrs[FIELD_CHARACTER_END] = str(end.char_offset)
self._attrs[FIELD_LINE_START] = str(start.line)
self._attrs[FIELD_COLUMN_START] = str(start.column)
self._attrs[FIELD_UTF16_COLUMN_START] = str(start.utf16column)
self._attrs[FIELD_LINE_END] = str(end.line)
self._attrs[FIELD_COLUMN_END] = str(end.column)
self._attrs[FIELD_UTF16_COLUMN_END] = str(end.utf16column)
def set_position(self, line: int, column: int, pointer: int):
self.set_linecol(line, column)
self._attrs[FIELD_POSITION_START] = str(pointer)
self._attrs[FIELD_POSITION_END] = str(pointer)
@property
def line(self):
return int(self._attrs.get(FIELD_LINE_START, -1))
@property
def col(self):
return int(self._attrs.get(FIELD_COLUMN_START, -1))
@property
def position(self):
return int(self._attrs.get(FIELD_POSITION_START))
@property
def hint(self):
return self._attrs.get(FIELD_HINT)
@property
def details(self):
return self._attrs.get(FIELD_DETAILS)
FIELD_HINT = 0x_00_01
FIELD_DETAILS = 0x_00_02
FIELD_SERVER_TRACEBACK = 0x_01_01
# XXX: Subject to be changed/deprecated.
FIELD_POSITION_START = 0x_FF_F1
FIELD_POSITION_END = 0x_FF_F2
FIELD_LINE_START = 0x_FF_F3
FIELD_COLUMN_START = 0x_FF_F4
FIELD_UTF16_COLUMN_START = 0x_FF_F5
FIELD_LINE_END = 0x_FF_F6
FIELD_COLUMN_END = 0x_FF_F7
FIELD_UTF16_COLUMN_END = 0x_FF_F8
FIELD_CHARACTER_START = 0x_FF_F9
FIELD_CHARACTER_END = 0x_FF_FA
| apache-2.0 | 4,119,359,837,803,341,000 | 29.28655 | 79 | 0.630044 | false |
jorsea/odoo-addons | product_template_search_by_ean13/__openerp__.py | 8 | 1460 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA',
'category': 'base.module_category_knowledge_management',
'demo_xml': [],
'depends': ['product'],
'description': """
Product Template Search by EAN 13
==================================
""",
'installable': True,
'license': 'AGPL-3',
'name': 'Product Template Search by EAN 13',
'test': [],
'data': [
'product_view.xml',
],
'website': 'www.adhoc.com.ar'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -2,418,287,399,865,728,000 | 36.435897 | 78 | 0.576712 | false |
hperala/kontuwikibot | pywikibot/families/wikibooks_family.py | 1 | 7380 | # -*- coding: utf-8 -*-
"""Family module for Wikibooks."""
from __future__ import unicode_literals
from pywikibot import family
__version__ = '$Id: 389c4f09d94b853d26a91486d518694d3d1ca38e $'
# The Wikimedia family that is known as Wikibooks
class Family(family.WikimediaFamily):
"""Family class for Wikibooks."""
closed_wikis = [
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Afar_Wikibooks
'aa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Akan_Wikibooks
'ak',
# https://als.wikipedia.org/wiki/Wikipedia:Stammtisch/Archiv_2008-1#Afterwards.2C_closure_and_deletion_of_Wiktionary.2C_Wikibooks_and_Wikiquote_sites
'als',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Assamese_Wikibooks
'as',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Asturianu_Wikibooks
'ast',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Aymar_Wikibooks
'ay',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bashkir_Wikibooks
'ba',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bislama_Wikibooks
'bi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Bambara_Wikibooks
'bm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Tibetan_Wikibooks
'bo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Chamorro_Wikibooks
'ch',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Corsu_Wikibooks
'co',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gaeilge_Wikibooks
'ga',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gothic_Wikibooks
'got',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Guarani_Wikibooks
'gn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Gujarati_Wikibooks
'gu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kannada_Wikibooks
'kn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Kashmiri_Wikibooks
'ks',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_L%C3%ABtzebuergesch_Wikibooks
'lb',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Lingala_Wikibooks
'ln',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Latvian_Wikibooks
'lv',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Maori_Wikibooks
'mi',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Mongolian_Wikibooks
'mn',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Burmese_Wikibooks
'my',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nauruan_Wikibooks
'na',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Nahuatl_Wikibooks
'nah',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Plattd%C3%BC%C3%BCtsch_Wikibooks
'nds',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Pashto_Wikibooks
'ps',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Quechua_Wikibooks
'qu',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Rumantsch_Wikibooks
'rm',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Sami_Wikibooks
'se',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Simple_English_Wikibooks_(3)
'simple',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Basa_Sunda_Wikibooks_(2)
'su',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Swahili_Wikibooks
'sw',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Turkmen_Wikibooks
'tk',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Uyghur_Wikibooks
'ug',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Volap%C3%BCk_Wikibooks
'vo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Walon_Wikibooks
'wa',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Xhosa_Wikibooks
'xh',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Yoruba_Wikibooks
'yo',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zhuang_Wikibooks
'za',
# https://meta.wikimedia.org/wiki/Proposals_for_closing_projects/Closure_of_Zulu_Wikibooks
'zu',
]
removed_wikis = [
'tokipona',
]
def __init__(self):
"""Constructor."""
super(Family, self).__init__()
self.name = 'wikibooks'
self.languages_by_size = [
'en', 'de', 'fr', 'hu', 'ja', 'it', 'es', 'pt', 'nl', 'pl', 'he',
'vi', 'ca', 'id', 'sq', 'fi', 'ru', 'fa', 'cs', 'zh', 'sv', 'hr',
'tr', 'ro', 'sr', 'ar', 'no', 'th', 'ko', 'gl', 'da', 'ta', 'mk',
'az', 'tl', 'is', 'ka', 'lt', 'tt', 'uk', 'eo', 'bg', 'sk', 'sl',
'el', 'hy', 'ms', 'sa', 'si', 'li', 'la', 'ml', 'ur', 'bn', 'ang',
'ia', 'cv', 'et', 'hi', 'km', 'mr', 'eu', 'oc', 'kk', 'fy', 'ne',
'ie', 'te', 'af', 'tg', 'ky', 'bs', 'pa', 'be', 'mg', 'cy',
'zh-min-nan', 'ku', 'uz',
]
self.langs = dict([(lang, '%s.wikibooks.org' % lang)
for lang in self.languages_by_size])
# Global bot allowed languages on
# https://meta.wikimedia.org/wiki/Bot_policy/Implementation#Current_implementation
self.cross_allowed = [
'af', 'ang', 'ca', 'fa', 'fy', 'it', 'nl', 'ru', 'th', 'zh',
]
# Which languages have a special order for putting interlanguage links,
# and what order is it? If a language is not in interwiki_putfirst,
# alphabetical order on language code is used. For languages that are in
# interwiki_putfirst, interwiki_putfirst is checked first, and
# languages are put in the order given there. All other languages are
# put after those, in code-alphabetical order.
self.interwiki_putfirst = {
'en': self.alphabetic,
'fi': self.alphabetic,
'fr': self.alphabetic,
'he': ['en'],
'hu': ['en'],
'pl': self.alphabetic,
'simple': self.alphabetic
}
def shared_data_repository(self, code, transcluded=False):
"""Return the shared data repository for this family."""
return ('wikidata', 'wikidata')
| mit | -2,413,153,517,792,425,500 | 48.530201 | 157 | 0.626965 | false |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/volumes/paths/path155.py | 1 | 1061 | import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template2",
path_list=[[TestAction.create_volume, "volume1", "=scsi"], \
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.detach_volume, "volume1"], \
[TestAction.reboot_vm, "vm1"],\
[TestAction.create_volume_snapshot, "volume1", "snapshot1"], \
[TestAction.create_volume_snapshot, "volume1", "snapshot2"], \
[TestAction.create_volume_snapshot, "volume1", "snapshot3"], \
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.create_volume_snapshot, "vm1-root", 'vm_snapshot1'], \
[TestAction.stop_vm, "vm1"],\
[TestAction.use_volume_snapshot, "snapshot1"], \
[TestAction.start_vm, "vm1"],\
[TestAction.batch_delete_volume_snapshot, ["snapshot2", "snapshot1"]], \
[TestAction.detach_volume, "volume1"], \
[TestAction.clone_vm, "vm1", "vm2", "=full"], \
[TestAction.ps_migrate_volume, "volume1"],\
[TestAction.delete_volume_snapshot, "snapshot3"], \
[TestAction.reboot_vm, "vm1"]])
| apache-2.0 | 2,246,609,339,134,716,000 | 47.227273 | 74 | 0.686145 | false |
thopiekar/Cura | cura/Settings/PerObjectContainerStack.py | 1 | 3365 | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Optional
from UM.Application import Application
from UM.Decorators import override
from UM.Settings.Interfaces import PropertyEvaluationContext
from UM.Settings.SettingInstance import InstanceState
from .CuraContainerStack import CuraContainerStack
class PerObjectContainerStack(CuraContainerStack):
def isDirty(self):
# This stack should never be auto saved, so always return that there is nothing to save.
return False
@override(CuraContainerStack)
def getProperty(self, key: str, property_name: str, context: Optional[PropertyEvaluationContext] = None) -> Any:
if context is None:
context = PropertyEvaluationContext()
context.pushContainer(self)
global_stack = Application.getInstance().getGlobalContainerStack()
if not global_stack:
return None
# Return the user defined value if present, otherwise, evaluate the value according to the default routine.
if self.getContainer(0).hasProperty(key, property_name):
if self.getContainer(0).getProperty(key, "state") == InstanceState.User:
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
# Handle the "limit_to_extruder" property.
limit_to_extruder = super().getProperty(key, "limit_to_extruder", context)
if limit_to_extruder is not None:
limit_to_extruder = str(limit_to_extruder)
# if this stack has the limit_to_extruder "not overridden", use the original limit_to_extruder as the current
# limit_to_extruder, so the values retrieved will be from the perspective of the original limit_to_extruder
# stack.
if limit_to_extruder == "-1":
if "original_limit_to_extruder" in context.context:
limit_to_extruder = context.context["original_limit_to_extruder"]
if limit_to_extruder is not None and limit_to_extruder != "-1" and limit_to_extruder in global_stack.extruders:
# set the original limit_to_extruder if this is the first stack that has a non-overridden limit_to_extruder
if "original_limit_to_extruder" not in context.context:
context.context["original_limit_to_extruder"] = limit_to_extruder
if super().getProperty(key, "settable_per_extruder", context):
result = global_stack.extruders[str(limit_to_extruder)].getProperty(key, property_name, context)
if result is not None:
context.popContainer()
return result
result = super().getProperty(key, property_name, context)
context.popContainer()
return result
@override(CuraContainerStack)
def setNextStack(self, stack: CuraContainerStack) -> None:
super().setNextStack(stack)
# trigger signal to re-evaluate all default settings
for key in self.getContainer(0).getAllKeys():
# only evaluate default settings
if self.getContainer(0).getProperty(key, "state") != InstanceState.Default:
continue
self._collectPropertyChanges(key, "value")
self._emitCollectedPropertyChanges()
| lgpl-3.0 | -3,890,242,072,327,734,000 | 44.472973 | 119 | 0.671322 | false |
LinuxChristian/home-assistant | tests/components/lock/test_demo.py | 22 | 1480 | """The tests for the Demo lock platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import lock
from tests.common import get_test_home_assistant
FRONT = 'lock.front_door'
KITCHEN = 'lock.kitchen_door'
class TestLockDemo(unittest.TestCase):
"""Test the demo lock."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.assertTrue(setup_component(self.hass, lock.DOMAIN, {
'lock': {
'platform': 'demo'
}
}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_is_locked(self):
"""Test if lock is locked."""
self.assertTrue(lock.is_locked(self.hass, FRONT))
self.hass.states.is_state(FRONT, 'locked')
self.assertFalse(lock.is_locked(self.hass, KITCHEN))
self.hass.states.is_state(KITCHEN, 'unlocked')
def test_locking(self):
"""Test the locking of a lock."""
lock.lock(self.hass, KITCHEN)
self.hass.block_till_done()
self.assertTrue(lock.is_locked(self.hass, KITCHEN))
def test_unlocking(self):
"""Test the unlocking of a lock."""
lock.unlock(self.hass, FRONT)
self.hass.block_till_done()
self.assertFalse(lock.is_locked(self.hass, FRONT))
| apache-2.0 | -5,968,172,991,669,416,000 | 28.6 | 65 | 0.629054 | false |
sorenh/cc | vendor/Twisted-10.0.0/doc/core/examples/dbcred.py | 3 | 6879 | #!/usr/bin/env python
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Simple example of a db checker: define a L{ICredentialsChecker} implementation
that deals with a database backend to authenticate a user.
"""
from twisted.cred import error
from twisted.cred.credentials import IUsernameHashedPassword, IUsernamePassword
from twisted.cred.checkers import ICredentialsChecker
from twisted.internet.defer import Deferred
from zope.interface import implements
class DBCredentialsChecker(object):
"""
This class checks the credentials of incoming connections
against a user table in a database.
"""
implements(ICredentialsChecker)
def __init__(self, runQuery,
query="SELECT username, password FROM user WHERE username = %s",
customCheckFunc=None, caseSensitivePasswords=True):
"""
@param runQuery: This will be called to get the info from the db.
Generally you'd want to create a
L{twisted.enterprice.adbapi.ConnectionPool} and pass it's runQuery
method here. Otherwise pass a function with the same prototype.
@type runQuery: C{callable}
@type query: query used to authenticate user.
@param query: C{str}
@param customCheckFunc: Use this if the passwords in the db are stored
as hashes. We'll just call this, so you can do the checking
yourself. It takes the following params:
(username, suppliedPass, dbPass) and must return a boolean.
@type customCheckFunc: C{callable}
@param caseSensitivePasswords: If true requires that every letter in
C{credentials.password} is exactly the same case as the it's
counterpart letter in the database.
This is only relevant if C{customCheckFunc} is not used.
@type caseSensitivePasswords: C{bool}
"""
self.runQuery = runQuery
self.caseSensitivePasswords = caseSensitivePasswords
self.customCheckFunc = customCheckFunc
# We can't support hashed password credentials if we only have a hash
# in the DB
if customCheckFunc:
self.credentialInterfaces = (IUsernamePassword,)
else:
self.credentialInterfaces = (
IUsernamePassword, IUsernameHashedPassword,)
self.sql = query
def requestAvatarId(self, credentials):
"""
Authenticates the kiosk against the database.
"""
# Check that the credentials instance implements at least one of our
# interfaces
for interface in self.credentialInterfaces:
if interface.providedBy(credentials):
break
else:
raise error.UnhandledCredentials()
# Ask the database for the username and password
dbDeferred = self.runQuery(self.sql, (credentials.username,))
# Setup our deferred result
deferred = Deferred()
dbDeferred.addCallbacks(self._cbAuthenticate, self._ebAuthenticate,
callbackArgs=(credentials, deferred),
errbackArgs=(credentials, deferred))
return deferred
def _cbAuthenticate(self, result, credentials, deferred):
"""
Checks to see if authentication was good. Called once the info has
been retrieved from the DB.
"""
if len(result) == 0:
# Username not found in db
deferred.errback(error.UnauthorizedLogin('Username unknown'))
else:
username, password = result[0]
if self.customCheckFunc:
# Let the owner do the checking
if self.customCheckFunc(
username, credentials.password, password):
deferred.callback(credentials.username)
else:
deferred.errback(
error.UnauthorizedLogin('Password mismatch'))
else:
# It's up to us or the credentials object to do the checking
# now
if IUsernameHashedPassword.providedBy(credentials):
# Let the hashed password checker do the checking
if credentials.checkPassword(password):
deferred.callback(credentials.username)
else:
deferred.errback(
error.UnauthorizedLogin('Password mismatch'))
elif IUsernamePassword.providedBy(credentials):
# Compare the passwords, deciging whether or not to use
# case sensitivity
if self.caseSensitivePasswords:
passOk = (
password.lower() == credentials.password.lower())
else:
passOk = password == credentials.password
# See if they match
if passOk:
deferred.callback(credentials.username)
else:
deferred.errback(
error.UnauthorizedLogin('Password mismatch'))
else:
# OK, we don't know how to check this
deferred.errback(error.UnhandledCredentials())
def _ebAuthenticate(self, message, credentials, deferred):
"""
The database lookup failed for some reason.
"""
deferred.errback(error.LoginFailed(message))
def main():
"""
Run a simple echo pb server to test the checker. It defines a custom query
for dealing with sqlite special quoting, but otherwise it's a
straightforward use of the object.
You can test it running C{pbechoclient.py}.
"""
import sys
from twisted.python import log
log.startLogging(sys.stdout)
import os
if os.path.isfile('testcred'):
os.remove('testcred')
from twisted.enterprise import adbapi
pool = adbapi.ConnectionPool('pysqlite2.dbapi2', 'testcred')
# Create the table that will be used
query1 = """CREATE TABLE user (
username string,
password string
)"""
# Insert a test user
query2 = """INSERT INTO user VALUES ('guest', 'guest')"""
def cb(res):
pool.runQuery(query2)
pool.runQuery(query1).addCallback(cb)
checker = DBCredentialsChecker(pool.runQuery,
query="SELECT username, password FROM user WHERE username = ?")
from twisted.cred.portal import Portal
import pbecho
from twisted.spread import pb
portal = Portal(pbecho.SimpleRealm())
portal.registerChecker(checker)
reactor.listenTCP(pb.portno, pb.PBServerFactory(portal))
if __name__ == "__main__":
from twisted.internet import reactor
reactor.callWhenRunning(main)
reactor.run()
| apache-2.0 | -7,243,590,942,366,937,000 | 37.430168 | 79 | 0.615933 | false |
mancoast/CPythonPyc_test | cpython/223_test_math.py | 5 | 6022 | # Python test set -- math module
# XXXX Should not do tests around zero only
from test_support import *
seps='1e-05'
eps = eval(seps)
print 'math module, testing with eps', seps
import math
def testit(name, value, expected):
if abs(value-expected) > eps:
raise TestFailed, '%s returned %f, expected %f'%\
(name, value, expected)
print 'constants'
testit('pi', math.pi, 3.1415926)
testit('e', math.e, 2.7182818)
print 'acos'
testit('acos(-1)', math.acos(-1), math.pi)
testit('acos(0)', math.acos(0), math.pi/2)
testit('acos(1)', math.acos(1), 0)
print 'asin'
testit('asin(-1)', math.asin(-1), -math.pi/2)
testit('asin(0)', math.asin(0), 0)
testit('asin(1)', math.asin(1), math.pi/2)
print 'atan'
testit('atan(-1)', math.atan(-1), -math.pi/4)
testit('atan(0)', math.atan(0), 0)
testit('atan(1)', math.atan(1), math.pi/4)
print 'atan2'
testit('atan2(-1, 0)', math.atan2(-1, 0), -math.pi/2)
testit('atan2(-1, 1)', math.atan2(-1, 1), -math.pi/4)
testit('atan2(0, 1)', math.atan2(0, 1), 0)
testit('atan2(1, 1)', math.atan2(1, 1), math.pi/4)
testit('atan2(1, 0)', math.atan2(1, 0), math.pi/2)
print 'ceil'
testit('ceil(0.5)', math.ceil(0.5), 1)
testit('ceil(1.0)', math.ceil(1.0), 1)
testit('ceil(1.5)', math.ceil(1.5), 2)
testit('ceil(-0.5)', math.ceil(-0.5), 0)
testit('ceil(-1.0)', math.ceil(-1.0), -1)
testit('ceil(-1.5)', math.ceil(-1.5), -1)
print 'cos'
testit('cos(-pi/2)', math.cos(-math.pi/2), 0)
testit('cos(0)', math.cos(0), 1)
testit('cos(pi/2)', math.cos(math.pi/2), 0)
testit('cos(pi)', math.cos(math.pi), -1)
print 'cosh'
testit('cosh(0)', math.cosh(0), 1)
testit('cosh(2)-2*cosh(1)**2', math.cosh(2)-2*math.cosh(1)**2, -1) # Thanks to Lambert
print 'exp'
testit('exp(-1)', math.exp(-1), 1/math.e)
testit('exp(0)', math.exp(0), 1)
testit('exp(1)', math.exp(1), math.e)
print 'fabs'
testit('fabs(-1)', math.fabs(-1), 1)
testit('fabs(0)', math.fabs(0), 0)
testit('fabs(1)', math.fabs(1), 1)
print 'floor'
testit('floor(0.5)', math.floor(0.5), 0)
testit('floor(1.0)', math.floor(1.0), 1)
testit('floor(1.5)', math.floor(1.5), 1)
testit('floor(-0.5)', math.floor(-0.5), -1)
testit('floor(-1.0)', math.floor(-1.0), -1)
testit('floor(-1.5)', math.floor(-1.5), -2)
print 'fmod'
testit('fmod(10,1)', math.fmod(10,1), 0)
testit('fmod(10,0.5)', math.fmod(10,0.5), 0)
testit('fmod(10,1.5)', math.fmod(10,1.5), 1)
testit('fmod(-10,1)', math.fmod(-10,1), 0)
testit('fmod(-10,0.5)', math.fmod(-10,0.5), 0)
testit('fmod(-10,1.5)', math.fmod(-10,1.5), -1)
print 'frexp'
def testfrexp(name, (mant, exp), (emant, eexp)):
if abs(mant-emant) > eps or exp != eexp:
raise TestFailed, '%s returned %s, expected %s'%\
(name, `mant, exp`, `emant,eexp`)
testfrexp('frexp(-1)', math.frexp(-1), (-0.5, 1))
testfrexp('frexp(0)', math.frexp(0), (0, 0))
testfrexp('frexp(1)', math.frexp(1), (0.5, 1))
testfrexp('frexp(2)', math.frexp(2), (0.5, 2))
print 'hypot'
testit('hypot(0,0)', math.hypot(0,0), 0)
testit('hypot(3,4)', math.hypot(3,4), 5)
print 'ldexp'
testit('ldexp(0,1)', math.ldexp(0,1), 0)
testit('ldexp(1,1)', math.ldexp(1,1), 2)
testit('ldexp(1,-1)', math.ldexp(1,-1), 0.5)
testit('ldexp(-1,1)', math.ldexp(-1,1), -2)
print 'log'
testit('log(1/e)', math.log(1/math.e), -1)
testit('log(1)', math.log(1), 0)
testit('log(e)', math.log(math.e), 1)
print 'log10'
testit('log10(0.1)', math.log10(0.1), -1)
testit('log10(1)', math.log10(1), 0)
testit('log10(10)', math.log10(10), 1)
print 'modf'
def testmodf(name, (v1, v2), (e1, e2)):
if abs(v1-e1) > eps or abs(v2-e2):
raise TestFailed, '%s returned %s, expected %s'%\
(name, `v1,v2`, `e1,e2`)
testmodf('modf(1.5)', math.modf(1.5), (0.5, 1.0))
testmodf('modf(-1.5)', math.modf(-1.5), (-0.5, -1.0))
print 'pow'
testit('pow(0,1)', math.pow(0,1), 0)
testit('pow(1,0)', math.pow(1,0), 1)
testit('pow(2,1)', math.pow(2,1), 2)
testit('pow(2,-1)', math.pow(2,-1), 0.5)
print 'sin'
testit('sin(0)', math.sin(0), 0)
testit('sin(pi/2)', math.sin(math.pi/2), 1)
testit('sin(-pi/2)', math.sin(-math.pi/2), -1)
print 'sinh'
testit('sinh(0)', math.sinh(0), 0)
testit('sinh(1)**2-cosh(1)**2', math.sinh(1)**2-math.cosh(1)**2, -1)
testit('sinh(1)+sinh(-1)', math.sinh(1)+math.sinh(-1), 0)
print 'sqrt'
testit('sqrt(0)', math.sqrt(0), 0)
testit('sqrt(1)', math.sqrt(1), 1)
testit('sqrt(4)', math.sqrt(4), 2)
print 'tan'
testit('tan(0)', math.tan(0), 0)
testit('tan(pi/4)', math.tan(math.pi/4), 1)
testit('tan(-pi/4)', math.tan(-math.pi/4), -1)
print 'tanh'
testit('tanh(0)', math.tanh(0), 0)
testit('tanh(1)+tanh(-1)', math.tanh(1)+math.tanh(-1), 0)
# RED_FLAG 16-Oct-2000 Tim
# While 2.0 is more consistent about exceptions than previous releases, it
# still fails this part of the test on some platforms. For now, we only
# *run* test_exceptions() in verbose mode, so that this isn't normally
# tested.
def test_exceptions():
print 'exceptions'
try:
x = math.exp(-1000000000)
except:
# mathmodule.c is failing to weed out underflows from libm, or
# we've got an fp format with huge dynamic range
raise TestFailed("underflowing exp() should not have raised "
"an exception")
if x != 0:
raise TestFailed("underflowing exp() should have returned 0")
# If this fails, probably using a strict IEEE-754 conforming libm, and x
# is +Inf afterwards. But Python wants overflows detected by default.
try:
x = math.exp(1000000000)
except OverflowError:
pass
else:
raise TestFailed("overflowing exp() didn't trigger OverflowError")
# If this fails, it could be a puzzle. One odd possibility is that
# mathmodule.c's macros are getting confused while comparing
# Inf (HUGE_VAL) to a NaN, and artificially setting errno to ERANGE
# as a result (and so raising OverflowError instead).
try:
x = math.sqrt(-1.0)
except ValueError:
pass
else:
raise TestFailed("sqrt(-1) didn't raise ValueError")
if verbose:
test_exceptions()
| gpl-3.0 | -3,319,627,338,319,984,000 | 29.882051 | 86 | 0.611425 | false |
MattNolanLab/ei-attractor | grid_cell_model/simulations/simulation_demo/default_params.py | 2 | 6183 | #
# default_params.py
#
# Default neuron and network parameters
#
# Copyright (C) 2012 Lukas Solanka <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
__all__ = ['defaultParameters']
_defaultOutputDir = "output/"
y_dim = np.sqrt(3)/2.
defaultParameters = {
"Ne" : 34,
"Ni" : 34,
"delay" : 0.1, # ms
"nthreads" : 1,
"printTime" : 0, # This is boolean
"ratVelFName" : '../../../data/hafting_et_al_2005/rat_trajectory_lowpass.mat',
"gridSep" : 60, # cm
"EI_flat" : 0, # bool
"IE_flat" : 0, # bool
"use_EE" : 0, # bool
"AMPA_gaussian" : 0, # bool
"pEE_sigma" : .05,
"pAMPA_mu" : y_dim/2.0,
"pAMPA_sigma" : 0.5/6,
"pGABA_mu" : y_dim/2.0,
"pGABA_sigma" : 0.5/6,
"prefDirC_e" : 4.0,
"prefDirC_ee" : 0.0,
"prefDirC_i" : 0.0,
"arenaSize" : 180.0, # cm
"NMDA_amount" : 2.0, # %
"C_Mg" : .0, # mM; def is no Vm dependence
"probabilistic_synapses": 0, # bool
"Iext_e_const" : 300.0, # pA
"Iext_i_const" : 200.0, # pA
"Iext_e_theta" : 375.0, # pA
"Iext_i_theta" : 25.0, # pA
"theta_start_t" : 0.5e3, # ms
"theta_freq" : 8, # Hz
"taum_e" : 9.3, # ms
"taum_e_spread" : 0.31, # ms
"EL_e" : -68.5, # mV
"EL_e_spread" : 0.20, # mV
"Vt_e" : -50, # mV
"Vr_e" : -68.5, # mV
"gL_e" : 22.73, # nS
"deltaT_e" : 0.4, # mV
"E_AHP_e" : -80, # mV
"tau_AHP_e" : 20, # ms
"g_AHP_e_max" : 5.0, # nS
"t_ref_e" : 0.1, # ms
"V_peak_e" : -40, # mV
"taum_i" : 10, # ms
"taum_i_spread" : 0, # ms
"EL_i" : -60, # mV
"EL_i_spread" : 0, # mV
"Vt_i" : -45, # mV
"Vr_i" : -60, # mV
"gL_i" : 22.73, # nS
"t_ref_i" : 0.1, # ms
"deltaT_i" : 0.4, # mV
"ad_tau_i_mean" : 7.5, # ms
"ad_tau_i_std" : 0.5, # ms, Unused in the simulation for now
"ad_i_g_inc" : 22.73, # nS
"V_peak_i" : -35, # mV
"tau_AMPA" : 1, # ms
"tau_NMDA_fall" : 100, # ms, only a single exponential used here
"g_AMPA_total" : 1400, # nS
"g_uni_AMPA_total" : 0, # nS
"uni_AMPA_density" : 0.001, # fraction
"tau_GABA_A_rise" : 0.1, # ms
"tau_GABA_A_fall" : 5, # ms
"g_GABA_total" : 2160, # nS
"g_uni_GABA_frac" : 0.013, # fraction of g_GABA_total
"uni_GABA_density" : 0.4,
"g_EI_uni_density" : .1, # Probability
"g_IE_uni_density" : .1, # Probability
"use_II" : 0, # bool
"g_II_total" : 50., # nS
"g_II_uni_density" : .1, # Probability
"E_AMPA" : 0, # mV
"E_GABA_A" : -75, # mV
"N_place_cells" : 30, # sqrt(total PC number)
"pc_max_rate" : 50.0, # Hz
"pc_conn_weight" : 0.5, # nS
"pc_field_std" : 20.0, # cm
"bumpCurrentSlope" : 0.53, # neurons/s/pA, !! this will depend on prefDirC !!
"pc_start_max_rate" : 100.0, # Hz
"pc_start_conn_weight" : 5.0, # nS
"ipc_ON" : 0, # bool
"ipc_N" : 30, # sqrt(total IPC number)
"ipc_nconn" : 10,
"ipc_field_std" : 20.0, # cm
"ipc_max_rate" : 50.0, # Hz
"ipc_weight" : 0.5, # nS
"noise_sigma" : 150.0, # pA
"gammaNSample" : 25, # No. of neurons
"sim_dt" : 0.1, # ms
"Vclamp" : -50, # mV
"ntrials" : 1,
"output_dir" : _defaultOutputDir,
"stateMonDur" : 20e3, # ms
}
| gpl-3.0 | -8,143,654,244,704,845,000 | 41.9375 | 100 | 0.336568 | false |
jhseu/tensorflow | tensorflow/compiler/aot/tests/make_test_graphs.py | 1 | 7775 | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow graphs for testing tfcompile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import six
from six.moves import range
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def tfadd(_):
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt.ckpt')
saver.save(sess, ckpt)
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.VariableV1(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.global_variables_initializer()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.ckpt')
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = os.path.join(out_dir, 'test_graph_tfadd_with_ckpt_saver.saver')
with open(saver_file, 'wb') as f:
f.write(six.ensure_binary(saver.as_saver_def().SerializeToString()))
def tfassert_eq(_):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
control_flow_ops.Assert(
math_ops.equal(x, y), ['Expected x == y.'], name='assert_eq')
math_ops.add(x, math_ops.negative(y), name='x_y_diff')
def tfcond(_):
p = array_ops.placeholder(dtypes.bool, name='p_hold')
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = array_ops.placeholder(dtypes.int32, name='y_hold')
z = control_flow_ops.cond(p, lambda: x, lambda: y)
array_ops.identity(z, name='result')
def tfgather(_):
params = array_ops.placeholder(dtypes.float32, name='params')
indices = array_ops.placeholder(dtypes.int32, name='indices')
array_ops.gather(params, indices, name='gather_output')
def tfmatmul(_):
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
def tfmatmulandadd(_):
# This tests multiple outputs.
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
math_ops.add(x, y, name='x_y_sum')
def tffunction(_):
@function.Defun(dtypes.int32, dtypes.int32)
def test_func(a, b):
return a + b
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
test_func(x, y, name='func_call') # pylint: disable=unexpected-keyword-arg
def tfsplits(_):
"""A more complex graph, including splits."""
x = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='y')
for _ in range(3):
x0, x1 = array_ops.split(x, 2, 0)
y0, y1 = array_ops.split(y, 2, 0)
x0 += 1
y0 += 1
z = math_ops.matmul(x, y, name='x_y_prod')
a = array_ops.concat([x0, y1], axis=0, name='concat_x0_y1')
b = array_ops.concat([y0, x1], axis=0, name='concat_y0_x1')
x = math_ops.matmul(a, b, name='a_b')
y = math_ops.add(x, z)
array_ops.identity(y, name='result')
def tftop_k(_):
x = array_ops.placeholder(dtypes.int32, shape=[5], name='x')
output = nn_ops.top_k(x, 2, name='values')
array_ops.identity(output[1], name='indices')
def tfvariable_readonly(_):
x = variables.Variable(1000.0, name='x')
old_x = x.value()
with ops.control_dependencies([old_x]):
new_value = math_ops.add(old_x, 42.0)
array_ops.identity(new_value, name='result')
# TODO(b/147908587): Change x and the two constants back to have a scalar shape
# when the bug is fixed.
def tfvariable(_):
x = variables.Variable([1000.0], name='x', shape=[1])
old_x = x.value()
with ops.control_dependencies([old_x]):
new_x = x.assign_add([42.0])
array_ops.stack([old_x, new_x], name='result')
def tfvariable_sequential_updates(_):
x = variables.Variable(1.0, name='x')
y = variables.Variable(1.0, name='y')
updates = control_flow_ops.no_op()
for _ in range(3):
with ops.control_dependencies([updates]):
x_val = x.read_value() + y
updates = x.assign_sub(0.1 * x_val)
array_ops.identity(updates, name='result')
def write_graph(build_graph, out_dir):
"""Build a graph using build_graph and write it out."""
g = ops.Graph()
with g.as_default():
build_graph(out_dir)
filename = os.path.join(out_dir, 'test_graph_%s.pb' % build_graph.__name__)
with open(filename, 'wb') as f:
f.write(six.ensure_binary(g.as_graph_def().SerializeToString()))
def main(_):
control_flow_util.enable_control_flow_v2()
write_graph(tfadd, FLAGS.out_dir)
write_graph(tfadd_with_ckpt, FLAGS.out_dir)
write_graph(tfadd_with_ckpt_saver, FLAGS.out_dir)
write_graph(tfassert_eq, FLAGS.out_dir)
write_graph(tfcond, FLAGS.out_dir)
write_graph(tffunction, FLAGS.out_dir)
write_graph(tfgather, FLAGS.out_dir)
write_graph(tfmatmul, FLAGS.out_dir)
write_graph(tfmatmulandadd, FLAGS.out_dir)
write_graph(tfsplits, FLAGS.out_dir)
write_graph(tftop_k, FLAGS.out_dir)
write_graph(tfvariable, FLAGS.out_dir)
write_graph(tfvariable_readonly, FLAGS.out_dir)
write_graph(tfvariable_sequential_updates, FLAGS.out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--out_dir',
type=str,
default='',
help='Output directory for graphs, checkpoints and savers.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | 4,341,346,972,946,254,000 | 33.709821 | 80 | 0.683344 | false |
openaid-IATI/OIPA | OIPA/api/transaction/tests/test_transaction_aggregation.py | 2 | 6552 | from decimal import Decimal
from django.test import TestCase
from rest_framework.test import APIClient
from iati.factory import iati_factory
from iati.transaction import factories as transaction_factory
class TransactionAggregationTestCase(TestCase):
def setUp(self):
"""
set up 2 activities. then create individual tests to check most used
aggregation / group by combinations.
"""
first_activity = iati_factory.ActivityFactory.create()
second_activity = iati_factory.ActivityFactory.create(
iati_identifier='IATI-0002',
iati_standard_version=first_activity.iati_standard_version)
# transaction type = 1 (incoming funds), works the same for
# disbursements etc.
first_transaction = transaction_factory.TransactionFactory.create(
activity=first_activity,
value=50000)
second_transaction = transaction_factory.TransactionFactory.create(
activity=second_activity,
value=10000,
transaction_type=first_transaction.transaction_type)
third_transaction = transaction_factory.TransactionFactory.create(
activity=second_activity,
value=25000,
transaction_type=first_transaction.transaction_type)
first_sector = iati_factory.SectorFactory.create(
code=11000, name='Sector 1')
second_sector = iati_factory.SectorFactory.create(
code=11001, name='Sector 2')
# TODO: Create appropriate objects here - 2016-04-18
transaction_sector = transaction_factory.TransactionSectorFactory\
.create(
transaction=first_transaction,
sector=first_sector,
percentage=100
)
transaction_factory.TransactionSectorFactory.create(
transaction=second_transaction,
sector=first_sector,
percentage=50,
vocabulary=transaction_sector.vocabulary
)
transaction_factory.TransactionSectorFactory.create(
transaction=third_transaction,
sector=first_sector,
percentage=50,
vocabulary=transaction_sector.vocabulary
)
transaction_factory.TransactionSectorFactory.create(
transaction=second_transaction,
sector=second_sector,
percentage=50,
vocabulary=transaction_sector.vocabulary
)
transaction_factory.TransactionSectorFactory.create(
transaction=third_transaction,
sector=second_sector,
percentage=50,
vocabulary=transaction_sector.vocabulary
)
country = iati_factory.CountryFactory(code="AD", name="Andorra")
second_country = iati_factory.CountryFactory(
code="KE", name="Kenya"
)
transaction_factory.TransactionRecipientCountryFactory.create(
transaction=first_transaction,
country=country,
percentage=100
)
transaction_factory.TransactionRecipientCountryFactory.create(
transaction=second_transaction,
country=country,
percentage=50
)
transaction_factory.TransactionRecipientCountryFactory.create(
transaction=third_transaction,
country=country,
percentage=50
)
transaction_factory.TransactionRecipientCountryFactory.create(
transaction=second_transaction,
country=second_country,
percentage=50
)
transaction_factory.TransactionRecipientCountryFactory.create(
transaction=third_transaction,
country=second_country,
percentage=50
)
self.api_client = APIClient()
def get_results(self, group_by, aggregations, order_by, filter_name=None,
filter_value=None):
url_parts = [
'/api/transactions/aggregations/?format=json&group_by=',
group_by,
'&aggregations=',
aggregations,
'&order_by=',
order_by,
]
if filter_name:
url_parts.extend(['&', filter_name, '=', filter_value])
url = ''.join(url_parts)
response = self.api_client.get(url)
return list(response.data['results'])
def test_sector_incoming_fund_group_by(self):
"""group incoming funds by sector (percentage weighted)
expected results:
sector 11000 = 67500 (t1 50000 + t2 5000 + t3 12500)
sector 11001 = 17500 (t2 5000 + t3 12500)
"""
results = self.get_results(
group_by='sector',
aggregations='incoming_fund',
order_by='sector')
self.assertTrue(len(results) == 2)
self.assertEqual(results[0]['incoming_fund'], Decimal(67500))
self.assertEqual(results[1]['incoming_fund'], Decimal(17500))
def test_sector_incoming_fund_group_by_with_recipient_country_filter(self):
"""group incoming funds by sector (percentage weighted)
expected results:
sector 11000 = 67500 (t1 50000 + t2 5000 + t3 12500)
sector 11001 = 17500 (t2 5000 + t3 12500)
country KE = 17500 (t1 0% + t2 50% + t3 50%)
so:
secor 11000 with filter KE = 8750 (t1 0 + t2 2500 + t3 6250)
secor 11001 with filter KE = 8750 (t1 0 + t2 2500 + t3 6250)
"""
results = self.get_results(
group_by='sector',
aggregations='incoming_fund',
order_by='sector',
filter_name='recipient_country',
filter_value='KE')
self.assertTrue(len(results) == 2)
self.assertEqual(results[0]['incoming_fund'], Decimal(8750))
self.assertEqual(results[1]['incoming_fund'], Decimal(8750))
def test_recipient_country_incoming_fund_group_by(self):
"""group incoming funds by recipient country (percentage weighted)
expected results:
country AD = 67500 (t1 50000 + t2 5000 + t3 12500)
country KE = 17500 (t2 5000 + t3 12500)
"""
results = self.get_results(
group_by='recipient_country',
aggregations='incoming_fund',
order_by='recipient_country')
self.assertTrue(len(results) == 2)
self.assertEqual(results[0]['incoming_fund'], Decimal(67500))
self.assertEqual(results[1]['incoming_fund'], Decimal(17500))
| agpl-3.0 | -4,018,049,694,509,657,600 | 35 | 79 | 0.609432 | false |
edofic/ggrc-core | test/integration/ggrc/services/test_custom_attributes.py | 6 | 11395 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for PUT and POST requests for objects with custom attributes
These tests include:
- Creating an object with custom attributes (POST request).
- Editing existing custom attributes on an object.
- Adding custom attributes to existing object.
"""
from ggrc import utils
from ggrc import models
from ggrc import builder
from integration.ggrc import services
from integration.ggrc.generator import ObjectGenerator
class ProductTestCase(services.TestCase):
"""Test case for Product post and put requests."""
def setUp(self):
services.TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def _put(self, url, data, extra_headers=None):
"""Perform a put request."""
headers = {'X-Requested-By': 'Unit Tests'}
headers.update(extra_headers)
return self.client.put(
url,
content_type='application/json',
data=utils.as_json(data),
headers=headers,
)
def _post(self, data):
"""Perform a post request."""
return self.client.post(
"/api/products",
content_type='application/json',
data=utils.as_json(data),
headers={'X-Requested-By': 'Unit Tests'},
)
class TestGlobalCustomAttributes(ProductTestCase):
"""Tests for API updates for custom attribute values."""
def test_custom_attribute_post(self):
"""Test post object with custom attributes."""
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
ca_json = response.json[0][1]["product"]["custom_attribute_values"][0]
self.assertIn("attributable_id", ca_json)
self.assertIn("attributable_type", ca_json)
self.assertIn("attribute_value", ca_json)
self.assertIn("id", ca_json)
self.assertEqual(ca_json["attribute_value"],
"my custom attribute value")
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"my custom attribute value"
)
def test_custom_attribute_put_add(self):
"""Test edits with adding new CA values."""
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
product_url = response.json[0][1]["product"]["selfLink"]
headers = self.client.get(product_url).headers
product_data[0]["product"]["custom_attribute_values"] = [{
"attribute_value": "added value",
"custom_attribute_id": cad.id,
}]
response = self._put(product_url, product_data[0], extra_headers={
'If-Unmodified-Since': headers["Last-Modified"],
'If-Match': headers["Etag"],
})
product = response.json["product"]
self.assertEqual(len(product["custom_attribute_values"]), 1)
ca_json = product["custom_attribute_values"][0]
self.assertIn("attributable_id", ca_json)
self.assertIn("attributable_type", ca_json)
self.assertIn("attribute_value", ca_json)
self.assertIn("id", ca_json)
self.assertEqual(ca_json["attribute_value"],
"added value")
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"added value"
)
headers = self.client.get(product_url).headers
product_data[0]["product"]["custom_attribute_values"] = [{
"attribute_value": "edited value",
"custom_attribute_id": cad.id,
}]
response = self._put(product_url, product_data[0], extra_headers={
'If-Unmodified-Since': headers["Last-Modified"],
'If-Match': headers["Etag"],
})
product = response.json["product"]
ca_json = product["custom_attribute_values"][0]
self.assertIn("attributable_id", ca_json)
self.assertIn("attributable_type", ca_json)
self.assertIn("attribute_value", ca_json)
self.assertIn("id", ca_json)
self.assertEqual(ca_json["attribute_value"],
"edited value")
def test_custom_attribute_get(self):
"""Check if get returns the whole CA value and not just the stub."""
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_values": [{
"attribute_value": "my custom attribute value",
"custom_attribute_id": cad.id,
}],
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
product_url = response.json[0][1]["product"]["selfLink"]
get_response = self.client.get(product_url)
product = get_response.json["product"]
self.assertIn("custom_attribute_values", product)
self.assertEqual(len(product["custom_attribute_values"]), 1)
cav = product["custom_attribute_values"][0]
self.assertIn("custom_attribute_id", cav)
self.assertIn("attribute_value", cav)
self.assertIn("id", cav)
class TestOldApiCompatibility(ProductTestCase):
"""Test Legacy CA values API.
These tests check that the old way of setting custom attribute values still
works and that If both ways are used, the legacy code is ignored.
"""
def test_custom_attribute_post_both(self):
"""Test post with both custom attribute api options.
This tests tries to set a custom attribute on the new and the old way at
once. The old option should be ignored and the new value should be set.
"""
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_definitions":[
cad_json,
],
"custom_attribute_values": [{
"attribute_value": "new value",
"custom_attribute_id": cad.id,
}],
"custom_attributes": {
cad.id: "old value",
},
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
ca_json = response.json[0][1]["product"]["custom_attribute_values"][0]
self.assertEqual(ca_json["attribute_value"], "new value")
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"new value"
)
def test_custom_attribute_post_old(self):
"""Test post with old style custom attribute values.
This tests that the legacy way of setting custom attribute values still
works.
"""
gen = self.generator.generate_custom_attribute
_, cad = gen("product", attribute_type="Text", title="normal text")
cad_json = builder.json.publish(cad.__class__.query.get(cad.id))
cad_json = builder.json.publish_representation(cad_json)
pid = models.Person.query.first().id
product_data = [
{
"product": {
"kind": None,
"owners": [],
"custom_attribute_definitions":[
cad_json,
],
"custom_attribute_values": [{
"id": 1,
"href": "/api/custom_attribute_values/1",
"type": "CustomAttributeValues"
}],
"custom_attributes": {
cad.id: "old value",
},
"contact": {
"id": pid,
"href": "/api/people/{}".format(pid),
"type": "Person"
},
"title": "simple product",
"description": "",
"secondary_contact": None,
"notes": "",
"url": "",
"reference_url": "",
"slug": "",
"context": None
}
}
]
response = self._post(product_data)
self.assert200(response)
ca_json = response.json[0][1]["product"]["custom_attribute_values"][0]
self.assertEqual(ca_json["attribute_value"], "old value")
product = models.Product.eager_query().first()
self.assertEqual(len(product.custom_attribute_values), 1)
self.assertEqual(
product.custom_attribute_values[0].attribute_value,
"old value"
)
| apache-2.0 | 3,621,041,103,040,772,000 | 32.125 | 78 | 0.537253 | false |
RamezIssac/django-datatable-view | ra_datatableview/views.py | 1 | 26039 | import simplejson as json
import re
import operator
import logging
try:
from functools import reduce
except ImportError:
pass
from django.views.generic.list import ListView, MultipleObjectMixin
from django.http import HttpResponse, HttpResponseBadRequest
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Model, Manager, Q
from django.utils.cache import add_never_cache_headers
from django.utils.text import smart_split
from django.views.decorators.csrf import ensure_csrf_cookie
from django.conf import settings
from django import get_version
import six
import dateutil.parser
from .forms import XEditableUpdateForm
from .utils import (FIELD_TYPES, ObjectListResult, DatatableOptions, split_real_fields,
filter_real_fields, get_datatable_structure, resolve_orm_path, get_first_orm_bit,
get_field_definition)
log = logging.getLogger(__name__)
CAN_UPDATE_FIELDS = get_version().split('.') >= ['1', '5']
class DatatableMixin(MultipleObjectMixin):
"""
Converts a view into an AJAX interface for obtaining records.
The normal GET execution adds a ``DataTable`` object to the context which can be used to
streamline the dumping of the HTML skeleton required for datatables.js to hook. A ``DataTable``
object doesn't hold any data, just a structure superficially generated from the options on the
view.
The template is responsible for making the AJAX request back to this view to populate the table
with data.
"""
datatable_options = None
datatable_context_name = 'datatable'
def get(self, request, *args, **kwargs):
"""
Detects AJAX access and returns appropriate serialized data. Normal access to the view is
unmodified.
"""
if request.is_ajax() or request.GET.get('ajax') == 'true':
return self.get_ajax(request, *args, **kwargs)
return super(DatatableMixin, self).get(request, *args, **kwargs)
def get_object_list(self):
""" Gets the core queryset, but applies the datatable options to it. """
return self.apply_queryset_options(self.get_queryset())
def get_datatable_options(self):
"""
Returns the DatatableOptions object for this view's configuration.
This method is guaranteed to be called only once per request.
"""
return self.datatable_options
def _get_datatable_options(self):
"""
Internal safe access. Guarantees that ``get_datatable_options()`` is called only once, so
that subclasses can use that method to modify the class attribute ``datatable_options``.
"""
if not hasattr(self, '_datatable_options'):
if self.model is None:
self.model = self.get_queryset().model
options = self.get_datatable_options()
if options:
# Options are defined, but probably in a raw dict format
options = DatatableOptions(self.model, self.request.GET, **dict(options))
else:
# No options defined on the view
options = DatatableOptions(self.model, self.request.GET)
self._datatable_options = options
return self._datatable_options
def apply_queryset_options(self, queryset):
"""
Interprets the datatable options.
Options requiring manual massaging of the queryset are handled here. The output of this
method should be treated as a list, since complex options might convert it out of the
original queryset form.
"""
options = self._get_datatable_options()
# These will hold residue queries that cannot be handled in at the database level. Anything
# in these variables by the end will be handled manually (read: less efficiently)
sort_fields = []
searches = []
# This count is for the benefit of the frontend datatables.js
total_initial_record_count = 0 # queryset.count()
if options['ordering']:
db_fields, sort_fields = split_real_fields(self.model, options['ordering'])
queryset = queryset.order_by(*db_fields)
if options['search']:
db_fields, searches = filter_real_fields(self.model, options['columns'],
key=get_first_orm_bit)
db_fields.extend(options['search_fields'])
queries = [] # Queries generated to search all fields for all terms
search_terms = map(lambda q: q.strip("'\" "), smart_split(options['search']))
for term in search_terms:
term_queries = [] # Queries generated to search all fields for this term
# Every concrete database lookup string in 'columns' is followed to its trailing field descriptor. For example, "subdivision__name" terminates in a CharField. The field type determines how it is probed for search.
for column in db_fields:
column = get_field_definition(column)
for component_name in column.fields:
field_queries = [] # Queries generated to search this database field for the search term
field = resolve_orm_path(self.model, component_name)
if isinstance(field, tuple(FIELD_TYPES['text'])):
field_queries = [{component_name + '__icontains': term}]
elif isinstance(field, tuple(FIELD_TYPES['date'])):
try:
date_obj = dateutil.parser.parse(term)
except ValueError:
# This exception is theoretical, but it doesn't seem to raise.
pass
except TypeError:
# Failed conversions can lead to the parser adding ints to None.
pass
else:
field_queries.append({component_name: date_obj})
# Add queries for more granular date field lookups
try:
numerical_value = int(term)
except ValueError:
pass
else:
if 0 < numerical_value < 3000:
field_queries.append({component_name + '__year': numerical_value})
if 0 < numerical_value <= 12:
field_queries.append({component_name + '__month': numerical_value})
if 0 < numerical_value <= 31:
field_queries.append({component_name + '__day': numerical_value})
elif isinstance(field, tuple(FIELD_TYPES['boolean'])):
if term.lower() in ('true', 'yes'):
term = True
elif term.lower() in ('false', 'no'):
term = False
else:
continue
field_queries = [{component_name: term}]
elif isinstance(field, tuple(FIELD_TYPES['integer'])):
try:
field_queries = [{component_name: int(term)}]
except ValueError:
pass
elif isinstance(field, tuple(FIELD_TYPES['float'])):
try:
field_queries = [{component_name: float(term)}]
except ValueError:
pass
elif isinstance(field, tuple(FIELD_TYPES['ignored'])):
pass
else:
raise ValueError("Unhandled field type for %s (%r) in search." % (component_name, type(field)))
# print field_queries
# Append each field inspection for this term
term_queries.extend(map(lambda q: Q(**q), field_queries))
# Append the logical OR of all field inspections for this term
if len(term_queries):
queries.append(reduce(operator.or_, term_queries))
# Apply the logical AND of all term inspections
if len(queries):
queryset = queryset.filter(reduce(operator.and_, queries))
# TODO: Remove "and not searches" from this conditional, since manual searches won't be done
if not sort_fields and not searches:
# We can shortcut and speed up the process if all operations are database-backed.
object_list = queryset
object_list._dtv_unpaged_total = 0 #queryset.count()
else:
object_list = ObjectListResult(queryset)
# # Manual searches
# # This is broken until it searches all items in object_list previous to the database
# # sort. That represents a runtime load that hits every row in code, rather than in the
# # database. If enabled, this would cripple performance on large datasets.
# if options['i_walk_the_dangerous_line_between_genius_and_insanity']:
# length = len(object_list)
# for i, obj in enumerate(reversed(object_list)):
# keep = False
# for column_info in searches:
# column_index = options['columns'].index(column_info)
# rich_data, plain_data = self.get_column_data(column_index, column_info, obj)
# for term in search_terms:
# if term.lower() in plain_data.lower():
# keep = True
# break
# if keep:
# break
#
# if not keep:
# removed = object_list.pop(length - 1 - i)
# # print column_info
# # print data
# # print '===='
# Sort the results manually for whatever remaining sort options are left over
def data_getter_orm(field_name):
def key(obj):
try:
return reduce(getattr, [obj] + field_name.split('__'))
except (AttributeError, ObjectDoesNotExist):
return None
return key
def data_getter_custom(i):
def key(obj):
rich_value, plain_value = self.get_column_data(i, options['columns'][i], obj)
return plain_value
return key
# Sort the list using the manual sort fields, back-to-front. `sort` is a stable
# operation, meaning that multiple passes can be made on the list using different
# criteria. The only catch is that the passes must be made in reverse order so that
# the "first" sort field with the most priority ends up getting applied last.
for sort_field in sort_fields[::-1]:
if sort_field.startswith('-'):
reverse = True
sort_field = sort_field[1:]
else:
reverse = False
if sort_field.startswith('!'):
key_function = data_getter_custom
sort_field = int(sort_field[1:])
else:
key_function = data_getter_orm
try:
object_list.sort(key=key_function(sort_field), reverse=reverse)
except TypeError as err:
log.error("Unable to sort on {0} - {1}".format(sort_field, err))
object_list._dtv_unpaged_total = len(object_list)
object_list._dtv_total_initial_record_count = total_initial_record_count
return object_list
def get_datatable_context_name(self):
return self.datatable_context_name
def get_datatable(self):
"""
Returns the helper object that can be used in the template to render the datatable skeleton.
"""
options = self._get_datatable_options()
return get_datatable_structure(self.request.path, options, model=self.model)
def get_context_data(self, **kwargs):
context = super(DatatableMixin, self).get_context_data(**kwargs)
context[self.get_datatable_context_name()] = self.get_datatable()
return context
# Ajax execution methods
def get_ajax(self, request, *args, **kwargs):
"""
Called in place of normal ``get()`` when accessed via AJAX.
"""
object_list = self.get_object_list()
total = object_list._dtv_total_initial_record_count
filtered_total = object_list._dtv_unpaged_total
response_data = self.get_json_response_object(object_list, total, filtered_total)
response = HttpResponse(self.serialize_to_json(response_data),
content_type="application/json")
#add_never_cache_headers(response)
return response
def get_json_response_object(self, object_list, total, filtered_total):
"""
Returns the JSON-compatible dictionary that will be serialized for an AJAX response.
The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays,
if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means
"array of arrays". In some instances, the author uses "ao~" for "array of objects", an
object being a javascript dictionary.
"""
object_list_page = self.paginate_object_list(object_list)
response_obj = {
'sEcho': self.request.GET.get('sEcho', None),
'iTotalRecords': total,
'iTotalDisplayRecords': filtered_total,
'aaData': [self.get_record_data(obj) for obj in object_list_page],
}
return response_obj
def paginate_object_list(self, object_list):
"""
If page_length is specified in the options or AJAX request, the result list is shortened to
the correct offset and length. Paged or not, the finalized object_list is then returned.
"""
options = self._get_datatable_options()
# Narrow the results to the appropriate page length for serialization
if options['page_length'] != -1:
i_begin = options['start_offset']
i_end = options['start_offset'] + options['page_length']
object_list = object_list[i_begin:i_end]
return object_list
def serialize_to_json(self, response_data):
""" Returns the JSON string for the compiled data object. """
indent = None
if settings.DEBUG:
indent = 4
return json.dumps(response_data, indent=indent , use_decimal=True)
def get_record_data(self, obj):
"""
Returns a list of column data intended to be passed directly back to dataTables.js.
Each column generates a 2-tuple of data. [0] is the data meant to be displayed to the client
and [1] is the data in plain-text form, meant for manual searches. One wouldn't want to
include HTML in [1], for example.
"""
options = self._get_datatable_options()
data = {
'DT_RowId': obj.pk,
}
for i, name in enumerate(options['columns']):
column_data = self.get_column_data(i, name, obj)[0]
if six.PY2 and isinstance(column_data, str): # not unicode
column_data = column_data.decode('utf-8')
data[str(i)] = six.text_type(column_data)
return data
def get_column_data(self, i, name, instance):
""" Finds the backing method for column ``name`` and returns the generated data. """
column = get_field_definition(name)
is_custom, f = self._get_resolver_method(i, column)
if is_custom:
args, kwargs = self._get_preloaded_data(instance)
try:
kwargs['default_value'] = self._get_column_data_default(instance, column)[1]
except AttributeError:
kwargs['default_value'] = None
kwargs['field_data'] = name
kwargs['view'] = self
values = f(instance, *args, **kwargs)
else:
values = f(instance, column)
if not isinstance(values, (tuple, list)):
if six.PY2:
if isinstance(values, str): # not unicode
values = values.decode('utf-8')
else:
values = unicode(values)
values = (values, re.sub(r'<[^>]+>', '', six.text_type(values)))
return values
def preload_record_data(self, instance):
"""
An empty hook for letting the view do something with ``instance`` before column lookups are
called against the object. The tuple of items returned will be passed as positional
arguments to any of the ``get_column_FIELD_NAME_data()`` methods.
"""
return ()
def _get_preloaded_data(self, instance):
"""
Fetches value from ``preload_record_data()``.
If a single value is returned and it is not a dict, list or tuple, it is made into a tuple.
The tuple will be supplied to the resolved method as ``*args``.
If the returned value is already a list/tuple, it will also be sent as ``*args``.
If the returned value is a dict, it will be sent as ``**kwargs``.
The two types cannot be mixed.
"""
preloaded_data = self.preload_record_data(instance)
if isinstance(preloaded_data, dict):
preloaded_args = ()
preloaded_kwargs = preloaded_data
elif isinstance(preloaded_data, (tuple, list)):
preloaded_args = preloaded_data
preloaded_kwargs = {}
else:
preloaded_args = (preloaded_data,)
preloaded_kwargs = {}
return preloaded_args, preloaded_kwargs
def _get_resolver_method(self, i, column):
"""
Using a slightly mangled version of the column's name (explained below) each column's value
is derived.
Each field can generate customized data by defining a method on the view called either
"get_column_FIELD_NAME_data" or "get_column_INDEX_data".
If the FIELD_NAME approach is used, the name is the raw field name (e.g., "street_name") or
else the friendly representation defined in a 2-tuple such as
("Street name", "subdivision__home__street_name"), where the name has non-alphanumeric
characters stripped to single underscores. For example, the friendly name
"Region: Subdivision Type" would convert to "Region_Subdivision_Type", requiring the method
name "get_column_Region_Subdivision_Type_data".
Alternatively, if the INDEX approach is used, a method will be fetched called
"get_column_0_data", or otherwise using the 0-based index of the column's position as
defined in the view's ``datatable_options['columns']`` setting.
Finally, if a third element is defined in the tuple, it will be treated as the function or
name of a member attribute which will be used directly.
"""
callback = column.callback
if callback:
if callable(callback):
return True, callback
return True, getattr(self, callback)
# Treat the 'nice name' as the starting point for looking up a method
name = column.pretty_name
if not name:
name = column.fields[0]
try:
mangled_name = re.sub(r'[\W_]+', '_', name)
except:
mangled_name = 'unknown'
print('ERROR DATATABLE.VIEWS._get_resolver_method')
f = getattr(self, 'get_column_%s_data' % mangled_name, None)
if f:
return True, f
f = getattr(self, 'get_column_%d_data' % i, None)
if f:
return True, f
return False, self._get_column_data_default
def _get_column_data_default(self, instance, column, *args, **kwargs):
""" Default mechanism for resolving ``column`` through the model instance ``instance``. """
def chain_lookup(obj, bit):
try:
value = getattr(obj, bit)
except (AttributeError, ObjectDoesNotExist):
value = None
else:
if callable(value):
if isinstance(value, Manager):
pass
elif not hasattr(value, 'alters_data') or value.alters_data is not True:
value = value()
return value
values = []
for field_name in column.fields:
value = reduce(chain_lookup, [instance] + field_name.split('__'))
if isinstance(value, Model):
value = six.text_type(value)
if value is not None:
values.append(value)
if len(values) == 1:
value = values[0]
else:
value = u' '.join(map(six.text_type, values))
return value, value
class XEditableMixin(object):
xeditable_form_class = XEditableUpdateForm
xeditable_fieldname_param = 'xeditable_field' # GET parameter name used for choices ajax
def get(self, request, *args, **kwargs):
""" Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """
if request.GET.get(self.xeditable_fieldname_param):
return self.get_ajax_xeditable_choices(request, *args, **kwargs)
# Doing this in the method body at runtime instead of at declaration-time helps prevent
# collisions of other subclasses also trying to decorate their own get() methods.
method = super(XEditableMixin, self).get
method = ensure_csrf_cookie(method)
return method(request, *args, **kwargs)
def get_ajax_xeditable_choices(self, request, *args, **kwargs):
""" AJAX GET handler for xeditable queries asking for field choice lists. """
field_name = request.GET[self.xeditable_fieldname_param]
if not self.model:
self.model = self.get_queryset().model
# Sanitize the requested field name by limiting valid names to the datatable_options columns
columns = self._get_datatable_options()['columns']
for name in columns:
if isinstance(name, (list, tuple)):
name = name[1]
if name == field_name:
break
else:
return HttpResponseBadRequest()
field = self.model._meta.get_field_by_name(field_name)[0]
choices = self.get_field_choices(field, field_name)
return HttpResponse(json.dumps(choices))
def post(self, request, *args, **kwargs):
self.object_list = None
form = self.get_xeditable_form(self.get_xeditable_form_class())
if form.is_valid():
obj = self.get_update_object(form)
if obj is None:
data = json.dumps({
'status': 'error',
'message': "Object does not exist."
})
return HttpResponse(data, content_type="application/json", status=404)
return self.update_object(form, obj)
else:
data = json.dumps({
'status': 'error',
'message': "Invalid request",
'form_errors': form.errors,
})
return HttpResponse(data, content_type="application/json", status=400)
def get_xeditable_form_class(self):
return self.xeditable_form_class
def get_xeditable_form_kwargs(self):
kwargs = {
'model': self.get_queryset().model,
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs
def get_xeditable_form(self, form_class):
return form_class(**self.get_xeditable_form_kwargs())
def get_update_object(self, form):
"""
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
"""
pk = form.cleaned_data['pk']
queryset = self.get_queryset()
try:
obj = queryset.get(pk=pk)
except queryset.model.DoesNotExist:
obj = None
return obj
def update_object(self, form, obj):
""" Saves the new value to the target object. """
field_name = form.cleaned_data['name']
value = form.cleaned_data['value']
setattr(obj, field_name, value)
save_kwargs = {}
if CAN_UPDATE_FIELDS:
save_kwargs['update_fields'] = [field_name]
obj.save(**save_kwargs)
data = json.dumps({
'status': 'success',
})
return HttpResponse(data, content_type="application/json")
def get_field_choices(self, field, field_name):
""" Returns the valid choices for ``field``. ``field_name`` is given for convenience. """
if self.request.GET.get('select2'):
names = ['id', 'text']
else:
names = ['value', 'text']
return [dict(zip(names, choice)) for choice in field.choices]
class DatatableView(DatatableMixin, ListView):
pass
class XEditableDatatableView(XEditableMixin, DatatableView):
pass
| apache-2.0 | 446,043,822,910,801,660 | 39.685938 | 231 | 0.568148 | false |
saintbird/django-cms | cms/test_utils/fixtures/navextenders.py | 46 | 1057 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.api import create_page
from cms.models.pagemodel import Page
class NavextendersFixture(object):
def create_fixtures(self):
"""
Tree from fixture:
page1
page2
page3
page4
page5
"""
defaults = {
'template': 'nav_playground.html',
'language': 'en',
}
with self.settings(CMS_PERMISSION=False):
p1 = create_page('page1', published=True, in_navigation=True, **defaults)
p4 = create_page('page4', published=True, in_navigation=True, **defaults)
p1 = Page.objects.get(pk=p1.pk)
p2 = create_page('page2', published=True, in_navigation=True, parent=p1, **defaults)
create_page('page3', published=True, in_navigation=True, parent=p2, **defaults)
p4 = Page.objects.get(pk=p4.pk)
create_page('page5', published=True, in_navigation=True, parent=p4, **defaults)
| bsd-3-clause | 1,900,532,495,443,964,700 | 35.448276 | 96 | 0.566698 | false |
BurntSushi/nflfan | nflfan/provider.py | 1 | 24616 | from __future__ import absolute_import, division, print_function
from collections import namedtuple
import json
import os
import re
import sys
import time
import requests
from bs4 import BeautifulSoup
import nfldb
import nflfan.config
__pdoc__ = {}
_user_agent = 'Mozilla/5.0 (X11; Linux x86_64)'
# _user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2498.0 Safari/537.36'
# _user_agent = ''
"""
The user agent string is heuristically determined. Namely, I was having
problems getting some providers to authenticate with more vague user
agent strings.
You may want to use a different user agent string entirely if you're
writing your own provider.
"""
_urls = {
'yahoo': {
'owner': 'http://football.fantasysports.yahoo.com/f1/%s/teams',
'matchup': 'http://football.fantasysports.yahoo.com/f1/%s/'
'matchup?matchup_week=%d&ajaxrequest=1',
'roster': 'http://football.fantasysports.yahoo.com/f1/%s/%s?week=%d',
},
'espn': {
'owner': 'http://games.espn.go.com/ffl/leaguesetup'
'/ownerinfo?leagueId={league_id}&seasonId={season_id}',
'matchup': 'http://games.espn.go.com/ffl/scoreboard?'
'leagueId={league_id}&matchupPeriodId={week}'
'&seasonId={season_id}',
'roster': 'http://games.espn.go.com/ffl/playertable/prebuilt/'
'manageroster?leagueId={league_id}&teamId={team_id}'
'&seasonId={season_id}&scoringPeriodId={week}'
'&view=overview&context=clubhouse'
'&ajaxPath=playertable/prebuilt/manageroster'
'&managingIr=false&droppingPlayers=false&asLM=false',
},
}
def pp(soup):
print(soup.prettify().encode('utf-8'))
def eprint(*args, **kwargs):
kwargs['file'] = sys.stderr
args = ['[nflfan]'] + list(args)
print(*args, **kwargs)
def player_search(db, full_name, team=None, position=None):
"""
A thin wrapper around `nfldb.player_search` that tries searching
with `team` or `position` when given, but if no results are found,
then this returns the results of a search with just the full name.
This allows for a slightly out-of-date database to still provide
a match while also disambiguating players with the same name.
"""
if position not in nfldb.Enums.player_pos:
position = None
p, _ = nfldb.player_search(db, full_name, team=team, position=position)
if p is None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=team, position=None)
if p is None and team is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=position)
if p is None and team is not None and position is not None:
p, _ = nfldb.player_search(db, full_name, team=None, position=None)
return p
class League (namedtuple('League',
'season phase ident prov_name name scoring conf')):
__pdoc__['League.season'] = \
"""The year of the NFL season for this league."""
__pdoc__['League.phase'] = \
"""The phase of the season: preseason, regular or post."""
__pdoc__['League.ident'] = \
"""
A unique identifier for this league. The type and format of
this value is provider dependent.
"""
__pdoc__['League.prov_name'] = \
"""The name of the provider for this league."""
__pdoc__['League.name'] = \
"""The name of this league from the configuration."""
__pdoc__['League.scoring'] = \
"""The `nflfan.ScoreSchema` for this league."""
__pdoc__['League.conf'] = \
"""
A dictionary of configuration settings. The keys and values in
this dictionary are provider dependent.
"""
def __init__(self, *args):
super(League, self).__init__(*args)
self._cache = {}
@property
def full_name(self):
return '%s.%s' % (self.prov_name, self.name)
def is_me(self, obj):
if not self.conf.get('me', None):
return False
if isinstance(obj, Roster):
return self.is_me(obj.owner)
elif isinstance(obj, Matchup):
return self.is_me(obj.owner1) or self.is_me(obj.owner2)
else:
return self.conf['me'].lower() in obj.name.lower()
def me(self, objs):
for obj in objs:
if self.is_me(obj):
return obj
return None
def owners(self, week):
return self._cached(week, 'owners')
def owner(self, week, ident):
for o in self.owners(week):
if o.ident == ident:
return o
return None
def matchups(self, week):
return self._cached(week, 'matchups')
def matchup(self, week, ident):
for m in self.matchups(week):
if m.owner1 is None or m.owner2 is None:
continue
if m.owner1.ident == ident or m.owner2.ident == ident:
return m
return None
def rosters(self, week):
return self._cached(week, 'rosters')
def roster(self, week, ident):
for r in self.rosters(week):
if r.owner.ident == ident:
return r
return None
def cache_path(self, week):
return os.path.join(nflfan.config.cache_path(),
str(self.season), str(self.phase), str(week),
self.full_name + '.json')
def _cached(self, week, key):
if week not in self._cache:
self._load(week)
return self._cache[week][key]
def _load(self, week):
raw = None
fp = self.cache_path(week)
try:
with open(fp) as f:
raw = json.load(f)
except IOError:
raise IOError(
"No cached data for week %d in %s could be found at %s\n"
"Have you run `nflfan-update --week %d` yet?"
% (week, self.full_name, fp, week))
d = {'owners': [], 'matchups': [], 'rosters': []}
for owner in raw['owners']:
d['owners'].append(Owner._make(owner))
for matchup in raw['matchups']:
o1 = None if matchup[0] is None else Owner._make(matchup[0])
o2 = None if matchup[1] is None else Owner._make(matchup[1])
d['matchups'].append(Matchup(o1, o2))
for roster in raw['rosters']:
o = Owner._make(roster[0])
r = Roster(o, roster[1], roster[2], [])
for rp in roster[3]:
r.players.append(RosterPlayer._make(rp))
d['rosters'].append(r)
self._cache[week] = d
def __str__(self):
return self.full_name
class Matchup (namedtuple('Matchup', 'owner1 owner2')):
__pdoc__['Matchup.owner1'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
__pdoc__['Matchup.owner2'] = \
"""
One of the two teams in this matchup represented as an
`nflfan.Owner` object.
"""
def other(self, ident):
"""
Given an identifier for one of the owner's in this matchup,
return the `nflfan.Owner` of the other owner.
"""
assert ident in (self.owner1.ident, self.owner2.ident)
if ident == self.owner1.ident:
return self.owner2
else:
return self.owner1
def __str__(self):
return '%s vs. %s' % (self.owner1, self.owner2)
class Owner (namedtuple('Owner', 'ident name')):
__pdoc__['Owner.ident'] = \
"""
A unique identifier corresponding to this owner. The type
of this value is provider-dependent.
"""
__pdoc__['Owner.name'] = \
"""A string representing the name of this owner."""
def __str__(self):
return self.name
class Roster (namedtuple('Roster', 'owner season week players')):
__pdoc__['Roster.owner'] = \
"""
A `nflfan.Owner` object corresponding to the owner of this
roster.
"""
__pdoc__['Roster.players'] = \
"""
A list of `nflfan.RosterPlayer` objects corresponding to the
set of players on this roster.
"""
def new_player(self, pos, team, bench, player_id):
"""
A convenience method for creating a new `nflfan.RosterPlayer`
given the current roster.
"""
return RosterPlayer(pos, team, bench, self.season, self.week,
None, 0.0, None, player_id)
@property
def active(self):
return filter(lambda rp: not rp.bench, self.players)
@property
def benched(self):
return filter(lambda rp: rp.bench, self.players)
@property
def points(self):
"""Returns the total number of points for non-benched players."""
return sum(p.points for p in self.players if not p.bench)
def __str__(self):
s = []
for rp in self.players:
s.append(str(rp))
return '\n'.join(s)
class RosterPlayer (
namedtuple('RosterPlayer',
'position team bench season week '
'game points player player_id')):
__pdoc__['RosterPlayer.position'] = \
"""
A string corresponding to the position of the roster spot
occupied by this player. The possible values of this string are
provider dependent.
"""
__pdoc__['RosterPlayer.team'] = \
"""
A team abbreviation that this player belongs to. It must be a
valid nfldb team abbreviation and *cannot* be `UNK`.
"""
__pdoc__['RosterPlayer.bench'] = \
"""A boolean indicating whether this is a bench position or not."""
__pdoc__['RosterPlayer.season'] = \
"""The year of the corresponding NFL season."""
__pdoc__['RosterPlayer.week'] = \
"""The week number in which this roster was set."""
__pdoc__['RosterPlayer.game'] = \
"""
The `nfldb.Game` object for the game that this player played
in. If this roster position corresponds to a bye week, then
this attribute is set to `None`.
"""
__pdoc__['RosterPlayer.points'] = \
"""The total fantasy points for this roster player."""
__pdoc__['RosterPlayer.player'] = \
"""
A `nfldb.Player` object corresponding to this roster player.
This attribute is `None` by default, and is always `None` for
roster players corresponding to entire teams (e.g., defense).
"""
__pdoc__['RosterPlayer.player_id'] = \
"""
A player id string corresponding to the player in this roster
position and a player in nfldb. This may be `None` when the
roster player corresponds to an entire team. (e.g., A defense.)
"""
@property
def is_empty(self):
return self.team is None and self.player_id is None
@property
def is_defense(self):
return self.team is not None and self.player_id is None
@property
def is_player(self):
return self.player_id is not None
@property
def id(self):
if self.is_empty:
return 'Empty'
elif self.is_defense:
return self.team
else:
return self.player_id
@property
def name(self):
return self.id if not self.player else self.player.full_name
def __str__(self):
if self.game is not None and self.game.is_playing:
playing = '*'
else:
playing = ' '
return '%-6s %-4s %-20s %s%0.2f' \
% (self.position, self.team, self.name, playing, self.points)
class Provider (object):
"""
This class describes the interface that each fantasy football
provider must implement so that it can work with nflfan. In other
words, this is an abstract base class that should **not** be
instantiated directly.
All public members of this class must also be defined in each
provider implementation, including the class variables.
"""
provider_name = None
"""The name of the provider used in the configuration file."""
conf_required = ['scoring', 'league_name', 'season', 'phase', 'league_id']
"""A list of fields required for every provider."""
conf_optional = ['me']
"""A list of fields that are optional for every provider."""
def __init__(self, lg):
self._lg = lg
self._session = requests.Session()
self._session.headers.update(getattr(self, '_headers', {}))
def owners(self):
"""Returns a list of `nflfan.Owner` objects."""
assert False, 'subclass responsibility'
def matchups(self, week):
"""
Given a week number, this returns a list of `nflfan.Matchup`
objects describing the head-to-head matchups for `week`.
"""
assert False, 'subclass responsibility'
def roster(self, player_search, owner, week):
"""
Given a `nflfan.Owner` and a week number, this returns a
`nflfan.Roster` object. The `nflfan.Roster` contains a list of
`nfldb.Player` objects and their corresponding position on the
roster.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
Note that the roster position is a string but the set of
possible values is provider dependent. It is used for display
purposes only.
"""
assert False, 'subclass responsibility'
def save(self, fp, player_search, week):
"""
Writes a JSON encoding of all the owners, matchups and rosters
for the given week to a file at `fp`.
`player_search` should be a function that takes a full
player name and returns the closest matching player as a
`nfldb.Player` object. It should also optionally take keyword
arguments `team` and `position` that allow for extra filtering.
"""
d = {
'owners': self.owners(),
'matchups': self.matchups(week),
}
# I'm hoping this doesn't hurt custom providers that don't need
# to do IO to fetch a roster.
def roster(owner):
return self.roster(player_search, owner, week)
# pool = multiprocessing.pool.ThreadPool(3)
# d['rosters'] = pool.map(roster, d['owners'])
d['rosters'] = map(roster, d['owners'])
try:
os.makedirs(os.path.dirname(fp))
except OSError:
pass
json.dump(d, open(fp, 'w+'))
def _request(self, url):
eprint('download %s' % url)
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
self._login()
r = self._session.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
if self._login_form(soup):
raise IOError("Authentication failure.")
return r
def _login(self):
assert self._login_url is not None
soup = BeautifulSoup(self._session.get(self._login_url).text,
'html.parser')
if not self._login_form(soup):
# Already logged in!
return
form = self._login_form(soup)
params = self._login_params(soup)
for inp in soup.select('#hiddens input[type="hidden"]'):
params[inp['name']] = inp['value']
r = self._session.post('https://login.yahoo.com' + form['action'],
params=params)
return BeautifulSoup(r.text, 'html.parser')
def _login_params(self):
assert False, 'subclass responsibility'
def _login_form(self, soup):
assert False, 'subclass responsibility'
def __str__(self):
return self.__class__.provider_name
class Yahoo (Provider):
provider_name = 'yahoo'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'https://login.yahoo.com/config/login'
def __init__(self, lg):
super(Yahoo, self).__init__(lg)
_, _, self._league_num = self._lg.ident.split('.')
def owners(self):
match_owner_link = re.compile('team-[0-9]+-name')
url = _urls['yahoo']['owner'] % self._league_num
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for link in soup.find_all(id=match_owner_link):
ident = self._owner_id_from_url(link['href'])
owners.append(Owner(ident, link.text.strip()))
return owners
def matchups(self, week):
mk_owner = lambda div: Owner(owner_id(div.a['href']), div.text.strip())
owner_id = self._owner_id_from_url
url = _urls['yahoo']['matchup'] % (self._league_num, week)
rjson = self._request(url).json()
soup = BeautifulSoup(rjson['content'], 'html.parser')
matchups = []
for matchup in soup.find('ul').children:
pair = list(matchup.find_all('div', class_='Fz-sm'))
if len(pair) == 1:
matchups.append(Matchup(mk_owner(pair[0]), None))
else:
matchups.append(Matchup(mk_owner(pair[0]), mk_owner(pair[1])))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
return row.td.find(class_='pos-label')['data-pos'].strip().upper()
def to_name(row):
return row.find(class_='ysf-player-name').a.text.strip()
def to_team(row):
team_pos = row.find(class_='ysf-player-name').span.text.strip()
return nfldb.standard_team(re.search('^\S+', team_pos).group(0))
def rplayer(r, name, team, pos):
bench = pos == 'BN'
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif nfldb.standard_team(name) != 'UNK':
return r.new_player(pos, team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
match_table_id = re.compile('^statTable[0-9]+$')
url = _urls['yahoo']['roster'] % (self._league_num, owner.ident, week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for table in soup.find_all(id=match_table_id):
for row in table.tbody.find_all('tr', recursive=False):
pos = to_pos(row)
try:
team, name = to_team(row), to_name(row)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('%s/([0-9]+)' % self._league_num, url).group(1)
def _login(self):
soup = super(Yahoo, self)._login()
if self._login_form(soup):
err_div = soup.find(id='mbr-login-error')
err_msg = 'Unknown error.'
if err_div:
err_msg = err_div.text.strip()
raise IOError('Login failed: %s' % err_msg)
def _login_params(self, soup):
return {
'username': self._lg.conf.get('username', ''),
'passwd': self._lg.conf.get('password', ''),
'signin': '',
# '.persistent': 'y',
'countrycode': '1',
# '_crumb': '8cSELfo475z',
# '_ts': str(int(time.time())),
# '_format': '',
# '_uuid': 'Q9JF85iYg9ax',
# '_seqid': '2',
# 'otp_channel': '',
}
def _login_form(self, soup):
return soup.find('form', id='mbr-login-form')
class ESPN (Provider):
provider_name = 'espn'
conf_required = []
conf_optional = ['username', 'password']
_headers = {'User-Agent': _user_agent}
_login_url = 'http://games.espn.go.com/ffl/signin?_=_'
def owners(self):
url = _urls['espn']['owner'].format(
league_id=self._lg.ident, season_id=self._lg.season)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
owners = []
for td in soup.select('tr.ownerRow td.teamName'):
ident = self._owner_id_from_url(td.a['href'])
owners.append(Owner(ident, td.text.strip()))
return owners
def matchups(self, week):
owner_id = self._owner_id_from_url
url = _urls['espn']['matchup'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
matchupDiv = soup.find(id='scoreboardMatchups')
matchups = []
for table in matchupDiv.select('table.matchup'):
t1, t2 = list(table.find_all(class_='name'))
id1, id2 = owner_id(t1.a['href']), owner_id(t2.a['href'])
name1, name2 = t1.a.text.strip(), t2.a.text.strip()
o1, o2 = Owner(id1, name1), Owner(id2, name2)
matchups.append(Matchup(o1, o2))
return matchups
def roster(self, player_search, owner, week):
def to_pos(row):
pos = row.find(class_='playerSlot').text.strip().upper()
if pos == 'BENCH':
return 'BN'
return pos
def to_name(row):
name = row.find(class_='playertablePlayerName').a.text.strip()
# If this is the defense, apparently 'D/ST' is included in
# the name. Wtf?
return re.sub('\s+D/ST$', '', name)
def to_team(row):
tpos = row.find(class_='playertablePlayerName').a.next_sibling
tpos = tpos.strip(' \r\n\t*,|').upper()
# This is a little weird because the team name seems to run
# in with the position. Perhaps a weird encoding quirk?
if len(tpos) < 2:
return 'UNK'
elif len(tpos) == 2:
return nfldb.standard_team(tpos)
else:
team = nfldb.standard_team(tpos[0:3])
if team == 'UNK':
team = nfldb.standard_team(tpos[0:2])
return team
def rplayer(r, name, team, pos):
bench = pos == 'BN'
name_team = nfldb.standard_team(name)
if name is None and team is None:
return r.new_player(pos, None, bench, None)
elif name_team != 'UNK':
return r.new_player(pos, name_team, bench, None)
else:
player = player_search(name, team=team, position=pos)
return r.new_player(pos, team, bench, player.player_id)
url = _urls['espn']['roster'].format(
league_id=self._lg.ident, season_id=self._lg.season, week=week,
team_id=owner.ident)
soup = BeautifulSoup(self._request(url).text, 'html.parser')
roster = Roster(owner, self._lg.season, week, [])
for tr in soup.select('tr.pncPlayerRow'):
if tr.get('id', '') == 'pncEmptyRow':
continue
pos = to_pos(tr)
try:
team, name = to_team(tr), to_name(tr)
roster.players.append(rplayer(roster, name, team, pos))
except AttributeError:
roster.players.append(rplayer(roster, None, None, pos))
return roster
def _owner_id_from_url(self, url):
return re.search('teamId=([0-9]+)', url).group(1)
def _login(self):
soup = super(ESPN, self)._login()
if self._login_form(soup):
err_msg = []
for msg in soup.find_all('font', color='#ff0000'):
err_msg.append(msg.text.strip())
err_msg = '\n'.join(err_msg) if err_msg else 'Unknown error.'
raise IOError('Login failed: %s' % err_msg)
def _login_params(self):
return {
'username': self._lg.conf.get('username', ''),
'password': self._lg.conf.get('password', ''),
'submit': 'Sign In',
}
def _login_form(self, soup):
return soup.find('form', attrs={'name': 'loginForm'})
| unlicense | 1,224,262,221,576,702,000 | 33.188889 | 121 | 0.56252 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.