repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
huard/scipy-work | scipy/io/matlab/byteordercodes.py | 2 | 1780 | ''' Byteorder utilities for system - numpy byteorder encoding
Converts a variety of string codes for little endian, big endian,
native byte order and swapped byte order to explicit numpy endian
codes - one of '<' (little endian) or '>' (big endian)
'''
import sys
sys_is_le = sys.byteorder == 'little'
native_code = sys_is_le and '<' or '>'
swapped_code = sys_is_le and '>' or '<'
aliases = {'little': ('little', '<', 'l', 'le'),
'big': ('big', '>', 'b', 'be'),
'native': ('native', '='),
'swapped': ('swapped', 'S')}
def to_numpy_code(code):
''' Convert various order codings to numpy format
Parameters
----------
code : {'little','big','l','b','le','be','<','>',
'native','=',
'swapped', 's'} string
code is converted to lower case before parsing
Returns
-------
out_code : {'<','>'} string
where '<' is the numpy dtype code for little
endian, and '>' is the code for big endian
Examples
--------
>>> import sys
>>> sys_is_le == (sys.byteorder == 'little')
True
>>> to_numpy_code('big')
'>'
>>> to_numpy_code('little')
'<'
>>> nc = to_numpy_code('native')
>>> nc == '<' if sys_is_le else nc == '>'
True
>>> sc = to_numpy_code('swapped')
>>> sc == '>' if sys_is_le else sc == '<'
True
'''
code = code.lower()
if code is None:
return native_code
if code in aliases['little']:
return '<'
elif code in aliases['big']:
return '>'
elif code in aliases['native']:
return native_code
elif code in aliases['swapped']:
return swapped_code
else:
raise ValueError(
'We cannot handle byte order %s' % code)
| bsd-3-clause | -7,523,489,223,941,686,000 | 26.384615 | 65 | 0.524719 | false |
sopoforic/cgrr-mariospicross | marios_picross_puzzle_editor_gui.py | 1 | 6904 | # Classic Game Resource Reader (CGRR): Parse resources from classic games.
# Copyright (C) 2014-2015 Tracy Poff
#
# This file is part of CGRR.
#
# CGRR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CGRR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CGRR. If not, see <http://www.gnu.org/licenses/>.
"""GUI for editing Mario's Picross levels."""
import logging
from tkinter import (Tk, Frame, Canvas, Menu, Label, Button,
DISABLED, NORMAL, W, E, N, S)
from tkinter.filedialog import askopenfilename, asksaveasfile
import mariospicross
logging.basicConfig(level=logging.DEBUG)
class MainWindow(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.plugin = mariospicross
self.parent = parent
self.initUI()
def initUI(self):
self.parent.title("Mario's Picross Puzzle Editor")
self.puzzles = None
# Build the menu
menubar = Menu(self.parent)
self.parent.config(menu=menubar)
fileMenu = Menu(menubar)
self.fileMenu = fileMenu
fileMenu.add_command(label="Open Mario's Picross ROM...",
command=self.onOpen)
fileMenu.add_command(label="Save ROM as...",
command=self.onSave,
state=DISABLED)
fileMenu.add_separator()
fileMenu.add_command(label="Exit", command=self.onExit)
menubar.add_cascade(label="File", menu=fileMenu)
# Navigation
Label(self.parent).grid(row=0, column=0)
Label(self.parent).grid(row=0, column=4)
self.parent.grid_columnconfigure(0, weight=1)
self.parent.grid_columnconfigure(4, weight=1)
prevButton = Button(self.parent,
text="<--",
command=self.onPrev,
state=DISABLED
)
self.prevButton = prevButton
prevButton.grid(row=0, column=1)
puzzle_number = 1
self.puzzle_number = puzzle_number
puzzleNumber = Label(self.parent, text="Puzzle #{}".format(puzzle_number))
self.puzzleNumber = puzzleNumber
puzzleNumber.grid(row=0, column=2)
nextButton = Button(self.parent,
text="-->",
command=self.onNext,
state=DISABLED
)
self.nextButton = nextButton
nextButton.grid(row=0, column=3)
# Canvas
canvas = Canvas(self.parent)
self.canvas = canvas
for i in range(15):
for j in range(15):
fillcolor = "gray80"
self.canvas.create_rectangle(10+20*j, 10+20*i,
10+20*(j+1), 10+20*(i+1),
fill=fillcolor,
tags="{},{}".format(i, j)
)
self.canvas.bind("<ButtonPress-1>", self.onClick)
canvas.grid(row=1, columnspan=5, sticky=W+E+N+S)
self.parent.grid_rowconfigure(1, weight=1)
def onOpen(self):
filepath = askopenfilename()
with open(filepath, "rb") as f:
self.rom = f.read()
self.puzzles = self.plugin.read_puzzles_from_rom(self.rom)
self.fileMenu.entryconfig(1, state=DISABLED)
self.fileMenu.entryconfig(2, state=NORMAL)
self.nextButton['state'] = NORMAL
self.prevButton['state'] = NORMAL
self.draw_puzzle()
def onSave(self):
with asksaveasfile(mode='wb') as outfile:
outfile.write(self.rom)
self.plugin.insert_puzzles(outfile, self.puzzles[1:])
def onExit(self):
self.quit()
def draw_puzzle(self):
for i in range(15):
for j in range(15):
fillcolor = (
"gray40" if self.puzzles[self.puzzle_number]['puzzle'][i][j]
else "gray80")
self.canvas.itemconfig("{},{}".format(i,j), fill=fillcolor)
def onPrev(self):
if self.puzzle_number == 1:
self.puzzle_number = 256
else:
self.puzzle_number -= 1
self.puzzleNumber['text'] = "Puzzle #{}".format(self.puzzle_number)
self.draw_puzzle()
def onNext(self):
if self.puzzle_number == 256:
self.puzzle_number = 1
else:
self.puzzle_number += 1
self.puzzleNumber['text'] = "Puzzle #{}".format(self.puzzle_number)
self.draw_puzzle()
def onClick(self, event):
if not self.puzzles:
return
num = event.widget.find_closest(event.x, event.y)[0]
i = num // 15
j = (num - 1) % 15
value = self.puzzles[self.puzzle_number]['puzzle'][i][j]
self.puzzles[self.puzzle_number]['puzzle'][i][j] = not value
fillcolor = (
"gray40" if self.puzzles[self.puzzle_number]['puzzle'][i][j]
else "gray80"
)
self.canvas.itemconfig(num, fill=fillcolor)
# set the puzzle size
if (any([self.puzzles[self.puzzle_number]['puzzle'][i][j]
for i in range(15)
for j in range(10, 15)]) or
any([self.puzzles[self.puzzle_number]['puzzle'][i][j]
for i in range(10,15)
for j in range(15)])):
self.puzzles[self.puzzle_number]['width'] = 15
self.puzzles[self.puzzle_number]['height'] = 15
elif (any([self.puzzles[self.puzzle_number]['puzzle'][i][j]
for i in range(10)
for j in range(5, 10)]) or
any([self.puzzles[self.puzzle_number]['puzzle'][i][j]
for i in range(5,10)
for j in range(10)])):
self.puzzles[self.puzzle_number]['width'] = 10
self.puzzles[self.puzzle_number]['height'] = 10
else:
self.puzzles[self.puzzle_number]['width'] = 5
self.puzzles[self.puzzle_number]['height'] = 5
def main():
root = Tk()
root.geometry("320x350+300+300")
root.resizable(0,0)
app = MainWindow(root)
root.mainloop()
if __name__ == '__main__':
main()
| gpl-3.0 | 3,956,064,887,759,260,700 | 34.919786 | 82 | 0.540701 | false |
nandhp/youtube-dl | youtube_dl/extractor/naver.py | 11 | 3766 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
ExtractorError,
)
class NaverIE(InfoExtractor):
_VALID_URL = r'https?://(?:m\.)?tvcast\.naver\.com/v/(?P<id>\d+)'
_TESTS = [{
'url': 'http://tvcast.naver.com/v/81652',
'info_dict': {
'id': '81652',
'ext': 'mp4',
'title': '[9월 모의고사 해설강의][수학_김상희] 수학 A형 16~20번',
'description': '합격불변의 법칙 메가스터디 | 메가스터디 수학 김상희 선생님이 9월 모의고사 수학A형 16번에서 20번까지 해설강의를 공개합니다.',
'upload_date': '20130903',
},
}, {
'url': 'http://tvcast.naver.com/v/395837',
'md5': '638ed4c12012c458fefcddfd01f173cd',
'info_dict': {
'id': '395837',
'ext': 'mp4',
'title': '9년이 지나도 아픈 기억, 전효성의 아버지',
'description': 'md5:5bf200dcbf4b66eb1b350d1eb9c753f7',
'upload_date': '20150519',
},
'skip': 'Georestricted',
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
m_id = re.search(r'var rmcPlayer = new nhn.rmcnmv.RMCVideoPlayer\("(.+?)", "(.+?)"',
webpage)
if m_id is None:
error = self._html_search_regex(
r'(?s)<div class="(?:nation_error|nation_box|error_box)">\s*(?:<!--.*?-->)?\s*<p class="[^"]+">(?P<msg>.+?)</p>\s*</div>',
webpage, 'error', default=None)
if error:
raise ExtractorError(error, expected=True)
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
query = compat_urllib_parse_urlencode({'vid': vid, 'inKey': key, })
query_urls = compat_urllib_parse_urlencode({
'masterVid': vid,
'protocol': 'p2p',
'inKey': key,
})
info = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/videoInfo.nhn?' + query,
video_id, 'Downloading video info')
urls = self._download_xml(
'http://serviceapi.rmcnmv.naver.com/flash/playableEncodingOption.nhn?' + query_urls,
video_id, 'Downloading video formats info')
formats = []
for format_el in urls.findall('EncodingOptions/EncodingOption'):
domain = format_el.find('Domain').text
uri = format_el.find('uri').text
f = {
'url': compat_urlparse.urljoin(domain, uri),
'ext': 'mp4',
'width': int(format_el.find('width').text),
'height': int(format_el.find('height').text),
}
if domain.startswith('rtmp'):
# urlparse does not support custom schemes
# https://bugs.python.org/issue18828
f.update({
'url': domain + uri,
'ext': 'flv',
'rtmp_protocol': '1', # rtmpt
})
formats.append(f)
self._sort_formats(formats)
return {
'id': video_id,
'title': info.find('Subject').text,
'formats': formats,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
'upload_date': info.find('WriteDate').text.replace('.', ''),
'view_count': int(info.find('PlayCount').text),
}
| unlicense | -8,815,060,308,708,217,000 | 35.714286 | 138 | 0.511117 | false |
evildmp/arkestra-clinical-studies | setup.py | 1 | 1220 | import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='arkestra-clinical-studies',
version='0.1',
packages=['arkestra_clinical_studies'],
include_package_data=True,
license='BSD License', # example license
description='Manage and publish information clinical trials in Arkestra',
long_description=README,
url='https://github.com/evildmp/arkestra-clinical-trials',
author='Daniele Procida',
author_email='[email protected]',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# replace these appropriately if you are using Python 3
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| bsd-2-clause | 255,867,232,613,774,080 | 35.969697 | 78 | 0.64918 | false |
alphacsc/alphacsc | alphacsc/utils/tests/test_constants.py | 1 | 1832 | import pytest
import numpy as np
from alphacsc.utils import check_random_state, get_D
from alphacsc.utils.whitening import whitening, apply_whitening
from alphacsc.utils.compute_constants import compute_DtD, compute_ztz
from alphacsc.utils.convolution import tensordot_convolve, construct_X_multi
def test_DtD():
n_atoms = 10
n_channels = 5
n_times_atom = 50
random_state = 42
rng = check_random_state(random_state)
uv = rng.randn(n_atoms, n_channels + n_times_atom)
D = get_D(uv, n_channels)
assert np.allclose(compute_DtD(uv, n_channels=n_channels),
compute_DtD(D))
@pytest.mark.parametrize('use_whitening', [False, True])
def test_ztz(use_whitening):
n_atoms = 7
n_trials = 3
n_channels = 5
n_times_valid = 500
n_times_atom = 10
n_times = n_times_valid + n_times_atom - 1
random_state = None
rng = check_random_state(random_state)
X = rng.randn(n_trials, n_channels, n_times)
z = rng.randn(n_trials, n_atoms, n_times_valid)
D = rng.randn(n_atoms, n_channels, n_times_atom)
if use_whitening:
ar_model, X = whitening(X)
zw = apply_whitening(ar_model, z, mode="full")
ztz = compute_ztz(zw, n_times_atom)
grad = np.zeros(D.shape)
for t in range(n_times_atom):
grad[:, :, t] = np.tensordot(ztz[:, :, t:t + n_times_atom],
D[:, :, ::-1],
axes=([1, 2], [0, 2]))
else:
ztz = compute_ztz(z, n_times_atom)
grad = tensordot_convolve(ztz, D)
cost = np.dot(D.ravel(), grad.ravel())
X_hat = construct_X_multi(z, D)
if use_whitening:
X_hat = apply_whitening(ar_model, X_hat, mode="full")
assert np.isclose(cost, np.dot(X_hat.ravel(), X_hat.ravel()))
| bsd-3-clause | 8,862,984,482,780,215,000 | 29.533333 | 76 | 0.592795 | false |
flo-compbio/gopca | gopca/gopca.py | 1 | 28600 | # Copyright (c) 2015, 2016 Florian Wagner
#
# This file is part of GO-PCA.
#
# GO-PCA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License, Version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module containing the `GOPCA` class.
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
# import sys
# import os
import logging
# import re
# import cPickle as pickle
import time
import hashlib
import copy
import datetime
from collections import Iterable, OrderedDict
from pkg_resources import parse_version
import numpy as np
import sklearn
from sklearn.decomposition import PCA
from scipy.stats import pearsonr
from genometools.basic import GeneSetCollection
from genometools.expression import ExpProfile, ExpMatrix, ExpGene, ExpGenome
from genometools import enrichment
from genometools.enrichment import RankBasedGSEResult, \
GeneSetEnrichmentAnalysis
from genometools.ontology import GeneOntology
import gopca
from . import GOPCAParams, GOPCAConfig, \
GOPCASignature, GOPCASignatureMatrix, GOPCARun
from . import util
logger = logging.getLogger(__name__)
class GOPCA(object):
"""Class for performing GO-PCA.
This class implements the GO-PCA algorithm. (The GO enrichment testing
is implemented in the `enrichment.GeneSetEnrichmentAnalysis` class of
the `genometools` package). The input data consists of an expression
matrix (`genometools.expression.ExpMatrix`) and a list of GO-PCA
"configurations" (`GOPCAConfig`), i.e., pairs of parameter settings and
gene set collections.
Parameters
----------
matrix : `genometools.expression.ExpMatrix`
See :attr:`matrix` attribute.
configs : Iterable of `GOPCAConfig`
See :attr:`configs` attribute.
num_components : int, optional
See :attr:`num_components` attribute. [0]
pc_seed : int, optional
See :attr:`pc_seed` attribute. [0]
pc_num_permutations : int, optional
See :attr:`pc_num_permutations` attribute. [15]
pc_zscore_thresh : float, optional
See :attr:`pc_zscore_thresh` attribute. [2.0]
pc_max_components : int, optional
See :attr:`pc_max_components` attribute. [0]
verbose : bool, optional
See :attr:`verbose` attribute. [False]
Attributes
----------
matrix : `genometools.expression.ExpMatrix`
The expression matrix.
configs : list of `GOPCAConfig`
The list of GO-PCA configurations. Each configuration consists of
gene sets (represented by a `GOPCAGeneSets` instance) along with a set
of GO-PCA parameters (`GOPCAParams`) to use for testing those gene
sets.
num_components : int
The number of principal components to test. If set 0, the number is
determined automatically using a permutation-based algorithm.
pc_seed : int
The random number generator seed, used to generate the permutations
for automatically determining the number of principal components to
test.
pc_num_permutations : int
The number of permutations to used for automatically determining the
number of principal components to test.
pc_zscore_thresh : float
The z-score threshold used for automatically determining the number of
principal components (PC) to test. First, the fraction of variance
explained by the first PC in each permuted dataset is calculated.
Then, the mean and standard deviation of those values are used to
calculate a z-score for the fraction of variance explained by each PC
in the real dataset. All PCs with a z-score above the specified
threshold are tested.
pc_max_components : int
The maximum number of principal components (PCs) to test (only relevant
when the algorithm for automatically determining the number of PCs
to test is used. For testing a fixed number of PCs, set the
:attr:`num_components` attribute to a non-zero value.
verbose : bool
If set to ``True``, generate more verbose output.
"""
__param_defaults = OrderedDict([
('num_components', 0), # 0 = automatic
('pc_seed', 0),
('pc_num_permutations', 15),
('pc_zscore_thresh', 2.0),
('pc_max_components', 0), # 0 = no maximum
])
"""Global GO-PCA parameter default values."""
@staticmethod
def get_param_defaults():
return GOPCA.__param_defaults.copy()
def __init__(self, matrix, configs, **kwargs):
assert isinstance(matrix, ExpMatrix)
assert isinstance(configs, Iterable)
num_components = kwargs.pop(
'num_components', self.__param_defaults['num_components'])
pc_seed = kwargs.pop(
'pc_seed', self.__param_defaults['num_components'])
pc_num_permutations = kwargs.pop(
'pc_num_permutations',
self.__param_defaults['pc_num_permutations'])
pc_zscore_thresh = kwargs.pop(
'pc_zscore_thresh', self.__param_defaults['pc_zscore_thresh'])
pc_max_components = kwargs.pop(
'pc_max_components', self.__param_defaults['pc_max_components'])
verbose = kwargs.pop('verbose', False)
assert isinstance(num_components, (int, np.integer))
assert isinstance(pc_seed, (int, np.integer))
assert isinstance(pc_num_permutations, (int, np.integer))
assert isinstance(pc_zscore_thresh, (float, np.float))
assert isinstance(pc_max_components, (int, np.integer))
assert isinstance(verbose, bool)
self.matrix = matrix
self.configs = list(configs)
self.num_components = int(num_components)
self.pc_seed = int(pc_seed)
self.pc_num_permutations = int(pc_num_permutations)
self.pc_zscore_thresh = float(pc_zscore_thresh)
self.pc_max_components = int(pc_max_components)
self.verbose = verbose
# make sure configs have the right type
for conf in self.configs:
assert isinstance(conf, GOPCAConfig)
@classmethod
def simple_setup(cls, matrix, params, gene_sets, gene_ontology=None,
**kwargs):
"""Initialize GO-PCA instance with only one collection of gene sets.
"""
# TODO: finish docstring
assert isinstance(matrix, ExpMatrix)
assert isinstance(params, GOPCAParams)
assert isinstance(gene_sets, GeneSetCollection)
if gene_ontology is not None:
assert isinstance(gene_ontology, GeneOntology)
configs = [GOPCAConfig(params, gene_sets, gene_ontology)]
return cls(matrix, configs, **kwargs)
def __repr__(self):
return '<%s instance (hash="%s")>' % \
(self.__class__.__name__, self.hash)
def __str__(self):
return '<%s instance (hash="%s")>' % \
(self.__class__.__name__, self.hash)
def __eq__(self, other):
if self is other:
return True
elif type(self) is type(other):
return self.__dict__ == other.__dict__
else:
return NotImplemented
def __ne__(self, other):
return not self.__eq__(other)
@property
def hash(self):
data_str = ';'.join(
repr(v) for v in [self.configs, self.matrix])
data = data_str.encode('UTF-8')
return str(hashlib.md5(data).hexdigest())
@property
def X(self):
return self.matrix.X
@staticmethod
def print_signatures(signatures, maxlength=50, debug=False):
"""Print a list of signatures, sorted by PC and enrichment score.
"""
sig_sorted = sorted(signatures, \
key=lambda sig: [abs(sig.pc), -sig.pc, sig.escore])
for sig in sig_sorted:
sig_label = sig.get_label(max_name_length=maxlength,
include_pval=True)
if debug:
logger.debug(sig_label)
else:
logger.info(sig_label)
@staticmethod
def get_pc_explained_variance_threshold(X, z, t, seed):
# TODO: Write docstring.
# RandomizedPCA does not work in Scikit-learn 0.14.1,
# but it works in Scikit-learn 0.16.1
use_old_randomized_pca = False
sklearn_pv = parse_version(sklearn.__version__)
use_old_pca = False
if sklearn_pv < parse_version('0.18.0'):
use_old_pca = True
if sklearn_pv >= parse_version('0.16.1'):
# old randomized PCA implementation
logger.debug('Using old scikit-learn randomized PCA implementation.')
from sklearn.decomposition import RandomizedPCA as PCA
else:
# no randomized PCA implementation available
logger.debug('No randomized PCA implementation available.')
from sklearn.decomposition import PCA
else:
# use new randomized PCA implementation
from sklearn.decomposition import PCA
# initialize random number generator
np.random.seed(seed)
# do permutations
p, n = X.shape
d_max_null = np.empty(t, dtype=np.float64)
X_perm = np.empty((p, n), dtype=np.float64)
if use_old_pca:
M_null = PCA(n_components=1)
else:
M_null = PCA(n_components=1, svd_solver='randomized')
for j in range(t):
for i in range(p):
X_perm[i, :] = X[i, np.random.permutation(n)]
M_null.fit(X_perm.T)
d_max_null[j] = M_null.explained_variance_ratio_[0]
# calculate z-score threshold
mean_null = np.mean(d_max_null)
std_null = np.std(d_max_null, ddof=1)
thresh = mean_null + z * std_null
return thresh
def estimate_num_components(self):
"""Estimate the number of non-trivial PCs using a permutation test.
"""
# TODO: finish docstring
logger.info('Estimating the number of principal components '
'(seed = %d)...', self.pc_seed)
logger.debug('(permutations = %d, z-score threshold = %.1f)...',
self.pc_num_permutations, self.pc_zscore_thresh)
# perform PCA
p, n = self.matrix.shape
d_max = min(p, n-1)
M_pca = PCA(n_components=d_max)
M_pca.fit(self.matrix.X.T)
d = M_pca.explained_variance_ratio_
logger.debug('Largest explained variance: %.2f', d[0])
thresh = self.get_pc_explained_variance_threshold(
self.X, self.pc_zscore_thresh, self.pc_num_permutations,
self.pc_seed)
logger.debug('Explained variance threshold: %.2f', thresh)
d_est = np.sum(d >= thresh)
logger.info('The estimated number of non-trivial PCs is %d.', d_est)
return d_est
@staticmethod
def _local_filter(params, gse_analysis, enriched, ranked_genes,
verbose=False):
"""Apply GO-PCA's "local" filter.
Returns the enriched gene sets that passed the filter.
"""
assert isinstance(params, GOPCAParams)
assert isinstance(gse_analysis, GeneSetEnrichmentAnalysis)
assert isinstance(enriched, Iterable)
assert isinstance(ranked_genes, Iterable)
assert isinstance(verbose, bool)
msg = logger.debug
if verbose:
msg = logger.info
if len(enriched) <= 1:
return enriched
# sort enriched gene sets by E-score (in descending order)
q = len(enriched)
a = sorted(range(q), key=lambda i: -enriched[i].escore)
todo = [enriched[i] for i in a]
# keep the most enriched gene set
most_enriched = todo[0]
kept = [most_enriched]
todo = todo[1:]
# exclude all genes contained in the most enriched gene set
genes_used = set(most_enriched.ind_genes)
new_ranked_genes = []
L = params.mHG_L
new_L = L
for i, g in enumerate(ranked_genes):
if g not in genes_used:
new_ranked_genes.append(g)
elif i < L: # gene was already used, adjust L if necessary
new_L -= 1
ranked_genes = new_ranked_genes
L = new_L
# start filtering
# suppress logging messages from the enrichment module
enr_logger = logging.getLogger(enrichment.__name__)
enr_logger.setLevel(logging.ERROR)
# initialize matrix for XL-mHG test
K_max = max([enr.K for enr in todo])
p = len(ranked_genes)
table = np.empty((K_max+1, p+1), dtype=np.longdouble)
while todo:
most_enriched = todo[0]
gs_id = most_enriched.gene_set.id
# test if GO term is still enriched after removing all previously
# used genes
enr = gse_analysis.get_rank_based_enrichment(
ranked_genes, params.pval_thresh,
params.mHG_X_frac, params.mHG_X_min, L,
adjust_pval_thresh=False,
escore_pval_thresh=params.escore_pval_thresh,
gene_set_ids=[gs_id], table=table,
exact_pval='if_necessary')
assert len(enr) in [0, 1]
# enr will be an empty list if GO term does not meet the p-value
# threshold
todo = todo[1:] # remove the current gene set from the to-do list
if not enr:
continue
elif params.escore_thresh is not None and \
enr[0].escore < params.escore_thresh:
continue
# enr = enr[0]
# print enr,'%d @ %d, s=%.1e' %(enr.k_n,enr.mHG_n,enr.stat)
# keep the gene set
kept.append(most_enriched)
# next, exclude selected genes from further analysis:
# 1) update set of used (excluded) genes 2) adjust L
genes_used.update(most_enriched.ind_genes)
new_ranked_genes = []
new_L = L
for i, g in enumerate(ranked_genes):
if g not in genes_used:
new_ranked_genes.append(g)
elif i < L: # gene was already used, adjust L if necessary
new_L -= 1
ranked_genes = new_ranked_genes
L = new_L
# stop suppressing log messages from the enrichment module
enr_logger.setLevel(logging.NOTSET)
return kept
@staticmethod
def _generate_signature(matrix, params, pc, gse_result,
standardize=False, verbose=False):
"""Generate a signature based on an enriched gene set.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(params, GOPCAParams)
assert isinstance(pc, int)
assert isinstance(gse_result, RankBasedGSEResult)
assert isinstance(standardize, bool)
assert isinstance(verbose, bool)
# select genes above cutoff giving rise to XL-mHG test statistic
enr_genes = gse_result.genes_above_cutoff
# calculate average expression
enr_matrix = matrix.loc[enr_genes].copy()
if standardize:
enr_matrix.standardize_genes(inplace=True)
# use the average expression of all genes above the XL-mHG cutoff as
# a "seed"
seed = ExpProfile(enr_matrix.mean(axis=0))
# rank all genes by their correlation with the seed, and select only
# those with correlation ">=" params.sig_corr_thresh, but no fewer
# than params.min_sig_genes
# calculate seed based on the X genes most strongly correlated with
# the average
corr = np.float64([pearsonr(seed.values, x)[0] for x in enr_matrix.X])
a = np.argsort(corr)
a = a[::-1]
# determine the number of genes to include
num_genes = max(np.sum(corr >= params.sig_corr_thresh),
params.sig_min_genes)
sig_matrix = enr_matrix.iloc[a[:num_genes]].copy()
return GOPCASignature(pc, gse_result, seed, sig_matrix)
@staticmethod
def _generate_pc_signatures(matrix, params, gse_analysis, W, pc,
standardize=False, verbose=False):
"""Generate signatures for a specific principal component and ordering.
The absolute value of ``pc`` determines the principal component (PC).
Genes are then ranked by their loadings for this PC. Whether this
ranking is in ascending or descending order is determined by the sign
of ``pc``: If it has a positive sign, then the ranking will be in
descending order (most positive loading values first). If it has a
negative sign, then the ranking will be in ascending order (most
negative loading values first).
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(params, GOPCAParams)
assert isinstance(gse_analysis, GeneSetEnrichmentAnalysis)
assert isinstance(W, np.ndarray) and W.ndim == 2
assert isinstance(pc, int) and pc != 0
assert isinstance(standardize, bool)
assert isinstance(verbose, bool)
msg = logger.debug
if verbose:
msg = logger.info
# rank genes by their PC loadings
pc_index = abs(pc)-1
a = np.argsort(W[:, pc_index])
if pc > 0:
# for positive pc values, use descending order
a = a[::-1]
ranked_genes = [matrix.index[i] for i in a]
# - find enriched gene sets using the XL-mHG test
# - get_enriched_gene_sets() also calculates the enrichment score,
# but does not use it for filtering
# suppress logging messages from genometools.enrichment module
enr_logger = logging.getLogger(enrichment.__name__)
enr_logger.setLevel(logging.ERROR)
logger.debug('config: %f %d %d',
params.mHG_X_frac, params.mHG_X_min, params.mHG_L)
enriched = gse_analysis.get_rank_based_enrichment(
ranked_genes, params.pval_thresh,
params.mHG_X_frac, params.mHG_X_min, params.mHG_L,
adjust_pval_thresh=False,
escore_pval_thresh=params.escore_pval_thresh,
exact_pval='if_significant')
if not enriched:
# no gene sets were found to be enriched
return []
# stop suppressing logging messages from genometools.enrichment module
enr_logger.setLevel(logging.NOTSET)
# filter enriched GO terms by strength of enrichment
# (if threshold is provided)
if params.escore_thresh is not None:
q_before = len(enriched)
enriched = [enr for enr in enriched
if enr.escore >= params.escore_thresh]
q = len(enriched)
msg('Kept %d / %d enriched gene sets with E-score >= %.1f',
q, q_before, params.escore_thresh)
# apply local filter (if enabled)
if not params.no_local_filter:
q_before = len(enriched)
enriched = GOPCA._local_filter(params, gse_analysis,
enriched, ranked_genes)
q = len(enriched)
msg('Local filter: Kept %d / %d enriched gene sets.', q, q_before)
# generate signatures
signatures = []
q = len(enriched)
for j, enr in enumerate(enriched):
signatures.append(
GOPCA._generate_signature(
matrix, params, pc, enr,
standardize=standardize, verbose=verbose))
msg('Generated %d signatures based on the enriched gene sets.', q)
return signatures
@staticmethod
def _global_filter(config, new_signatures, previous_signatures,
ontology=None):
"""Apply GO-PCA's "global" filter.
"""
if len(previous_signatures) == 0:
return new_signatures
kept = []
previous_gene_sets = set([sig.gene_set.id for sig in
previous_signatures])
for sig in new_signatures:
gs_id = sig.gene_set.id
test_gene_sets = {gs_id, }
if ontology is not None:
term = ontology[gs_id] # get the GOTerm object
test_gene_sets |= (term.ancestors | term.descendants)
overlap = test_gene_sets & previous_gene_sets
if overlap:
logger.debug('Gene set "%s" filtered out.', sig.gene_set.name)
else:
kept.append(sig)
return kept
@staticmethod
def _get_config_dict(config):
return config.get_dict()
# end static functions
# public functions
def has_param(self, name):
return self.config.has_param(name)
def get_param(self, name):
return self.config.get_param(name)
def set_param(self, name, value):
"""Set a GO-PCA parameter.
Parameters
----------
name: str
The name of the parameter.
value: ?
The value of the parameter.
Returns
-------
None
"""
self.config.set_param(name, value)
def run(self):
"""Perform GO-PCA.
Parameters
----------
Returns
-------
`GOPCARun` or None
The GO-PCA run, or ``None`` if the run failed.
"""
t0 = time.time() # remember the start time
timestamp = str(datetime.datetime.utcnow()) # timestamp for the run
### Phase 1: Make sure all configurations are valid
all_configs_valid = True
for config in self.configs:
if not config.user_params.check_params():
# problems with the configuration
all_configs_valid = False
config.finalize_params(self.matrix.p)
if not config.params.check_params():
all_configs_valid = False
if not all_configs_valid:
logger.error('Invalid configuration settings. '
'Aborting GO-PCA run.')
return None
# print some information
p, n = self.matrix.shape
logger.info('Timestamp: %s', timestamp)
logger.info('Size of expression matrix: ' +
'p=%d genes x n=%d samples.', p, n)
# Report hash values for expression matrix and configurations
expression_hash = self.matrix.hash
logger.info('Expression matrix hash: %s', expression_hash)
config_hashes = []
for i, config in enumerate(self.configs):
config_hashes.append(config.hash)
logger.info('Configuration #%d hash: %s', i+1, config_hashes[-1])
### Phase 2: Determine the number of principal components
num_components = self.num_components
if num_components == 0:
# estimate the number of non-trivial PCs using a permutation test
num_components = self.estimate_num_components()
if num_components == 0:
logger.error('The estimated number of non-trivial '
'principal components is 0. '
'Aborting GO-PCA run.')
return None
if 0 < self.pc_max_components < num_components:
num_components = self.pc_max_components
logger.info('Limiting the number of PCs to test to %d.', num_components)
else:
# determine the total number of principal components
# (i.e., the number of dimensions spanned by the data)
max_components = min(self.matrix.p, self.matrix.n - 1)
if self.num_components > max_components:
logger.error('The number of PCs to test was specified as '
'%d, but the data spans only %d dimensions. '
'Aborting GO-PCA run.',
num_components, max_components)
return None
if num_components == 0:
logger.error('No principal components to test.'
'Aborting GO-PCA run.')
return None
### Phase 3: Perform PCA
logger.info('Performing PCA...')
pca = PCA(n_components=num_components)
Y = pca.fit_transform(self.matrix.X.T)
# output fraction of variance explained for the PCs tested
frac = pca.explained_variance_ratio_
cum_frac = np.cumsum(frac)
logger.info('Fraction of total variance explained by the first '
'%d PCs: %.1f%%', num_components, 100 * cum_frac[-1])
### Phase 4: Run GO-PCA for each configuration supplied
enr_logger = logging.getLogger(enrichment.__name__)
genome = ExpGenome.from_gene_names(self.matrix.genes.tolist())
W = pca.components_.T # the loadings matrix
msg = logger.debug
if self.verbose:
# enable more verbose "INFO" messages
msg = logger.info
all_signatures = []
for k, config in enumerate(self.configs):
logger.info('Generating GO-PCA signatures for configuration '
'%d...', k+1)
# create GeneSetEnrichmentAnalysis object
enr_logger.setLevel(logging.ERROR)
gse_analysis = GeneSetEnrichmentAnalysis(genome, config.gene_sets)
enr_logger.setLevel(logging.NOTSET)
# generate signatures
final_signatures = []
var_expl = 0.0
for d in range(num_components):
var_expl += frac[d]
msg('')
msg('-'*70)
msg('PC %d explains %.1f%% of the variance.',
d+1, 100*frac[d])
msg('The new cumulative fraction of variance explained '
'is %.1f%%.', 100*var_expl)
signatures_dsc = self._generate_pc_signatures(
self.matrix, config.params, gse_analysis, W, d+1)
signatures_asc = self._generate_pc_signatures(
self.matrix, config.params, gse_analysis, W, -(d+1))
signatures = signatures_dsc + signatures_asc
msg('# signatures: %d', len(signatures))
# apply global filter (if enabled)
if not config.params.no_global_filter:
before = len(signatures)
signatures = self._global_filter(
config.params, signatures, final_signatures,
config.gene_ontology)
msg('Global filter: kept %d / %d signatures.',
len(signatures), before)
# self.print_signatures(signatures, debug=True)
final_signatures.extend(signatures)
msg('Total no. of signatures generated so far: %d',
len(final_signatures))
logger.info('')
logger.info('='*70)
logger.info('GO-PCA for configuration #%d generated %d '
'signatures.', k+1, len(final_signatures))
logger.info('-'*70)
self.print_signatures(final_signatures)
logger.info('='*70)
logger.info('')
all_signatures.extend(final_signatures)
### Phase 5: Generate signature matrix and return a `GOPCARun` instance
sig_matrix = GOPCASignatureMatrix.from_signatures(all_signatures)
t1 = time.time()
exec_time = t1 - t0
logger.info('This GO-PCA run took %.2f s.', exec_time)
gopca_run = GOPCARun(sig_matrix,
gopca.__version__, timestamp, exec_time,
expression_hash, config_hashes,
self.matrix.genes, self.matrix.samples, W, Y)
return gopca_run
| gpl-3.0 | -7,564,558,166,781,087,000 | 36.094682 | 88 | 0.584755 | false |
nebw/keras | tests/keras/layers/test_wrappers.py | 3 | 4618 | import pytest
import numpy as np
from numpy.testing import assert_allclose
from keras.utils.test_utils import keras_test
from keras.layers import wrappers, Input
from keras.layers import core, convolutional, recurrent
from keras.models import Sequential, Model, model_from_json
@keras_test
def test_TimeDistributed():
# first, test with Dense layer
model = Sequential()
model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 2)), nb_epoch=1, batch_size=10)
# test config
model.get_config()
# compare to TimeDistributedDense
test_input = np.random.random((1, 3, 4))
test_output = model.predict(test_input)
weights = model.layers[0].get_weights()
reference = Sequential()
reference.add(core.TimeDistributedDense(2, input_shape=(3, 4), weights=weights))
reference.add(core.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test when specifying a batch_input_shape
reference = Sequential()
reference.add(core.TimeDistributedDense(2, batch_input_shape=(1, 3, 4), weights=weights))
reference.add(core.Activation('relu'))
reference.compile(optimizer='rmsprop', loss='mse')
reference_output = reference.predict(test_input)
assert_allclose(test_output, reference_output, atol=1e-05)
# test with Convolution2D
model = Sequential()
model.add(wrappers.TimeDistributed(convolutional.Convolution2D(5, 2, 2, border_mode='same'), input_shape=(2, 3, 4, 4)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.random.random((1, 2, 3, 4, 4)), np.random.random((1, 2, 5, 4, 4)))
model = model_from_json(model.to_json())
model.summary()
# test stacked layers
model = Sequential()
model.add(wrappers.TimeDistributed(core.Dense(2), input_shape=(3, 4)))
model.add(wrappers.TimeDistributed(core.Dense(3)))
model.add(core.Activation('relu'))
model.compile(optimizer='rmsprop', loss='mse')
model.fit(np.random.random((10, 3, 4)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
# test wrapping Sequential model
model = Sequential()
model.add(core.Dense(3, input_dim=2))
outer_model = Sequential()
outer_model.add(wrappers.TimeDistributed(model, input_shape=(3, 2)))
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
# test with functional API
x = Input(shape=(3, 2))
y = wrappers.TimeDistributed(model)(x)
outer_model = Model(x, y)
outer_model.compile(optimizer='rmsprop', loss='mse')
outer_model.fit(np.random.random((10, 3, 2)), np.random.random((10, 3, 3)), nb_epoch=1, batch_size=10)
@keras_test
def test_Bidirectional():
rnn = recurrent.SimpleRNN
nb_sample = 2
dim = 2
timesteps = 2
output_dim = 2
for mode in ['sum', 'concat']:
x = np.random.random((nb_sample, timesteps, dim))
target_dim = 2 * output_dim if mode == 'concat' else output_dim
y = np.random.random((nb_sample, target_dim))
# test with Sequential model
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim),
merge_mode=mode, input_shape=(timesteps, dim)))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, nb_epoch=1, batch_size=1)
# test config
model.get_config()
model = model_from_json(model.to_json())
model.summary()
# test stacked bidirectional layers
model = Sequential()
model.add(wrappers.Bidirectional(rnn(output_dim, return_sequences=True),
merge_mode=mode, input_shape=(timesteps, dim)))
model.add(wrappers.Bidirectional(rnn(output_dim), merge_mode=mode))
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, nb_epoch=1, batch_size=1)
# test with functional API
input = Input((timesteps, dim))
output = wrappers.Bidirectional(rnn(output_dim), merge_mode=mode)(input)
model = Model(input, output)
model.compile(loss='mse', optimizer='sgd')
model.fit(x, y, nb_epoch=1, batch_size=1)
if __name__ == '__main__':
pytest.main([__file__])
| mit | -8,808,156,948,719,307,000 | 37.483333 | 123 | 0.651581 | false |
viblo/pymunk | pymunk/space_debug_draw_options.py | 1 | 13059 | __docformat__ = "reStructuredText"
from typing import TYPE_CHECKING, List, NamedTuple, Optional, Sequence, Tuple, Type
if TYPE_CHECKING:
from .shapes import Shape
from types import TracebackType
import math
from ._chipmunk_cffi import ffi, lib
from .body import Body
from .vec2d import Vec2d
_DrawFlags = int
class SpaceDebugColor(NamedTuple):
"""Color tuple used by the debug drawing API."""
r: float
g: float
b: float
a: float
def as_int(self) -> Tuple[int, int, int, int]:
"""Return the color as a tuple of ints, where each value is rounded.
>>> SpaceDebugColor(0, 51.1, 101.9, 255).as_int()
(0, 51, 102, 255)
"""
return round(self[0]), round(self[1]), round(self[2]), round(self[3])
def as_float(self) -> Tuple[float, float, float, float]:
"""Return the color as a tuple of floats, each value divided by 255.
>>> SpaceDebugColor(0, 51, 102, 255).as_float()
(0.0, 0.2, 0.4, 1.0)
"""
return self[0] / 255.0, self[1] / 255.0, self[2] / 255.0, self[3] / 255.0
class SpaceDebugDrawOptions(object):
"""SpaceDebugDrawOptions configures debug drawing.
If appropriate its usually easy to use the supplied draw implementations
directly: pymunk.pygame_util, pymunk.pyglet_util and pymunk.matplotlib_util.
"""
DRAW_SHAPES = lib.CP_SPACE_DEBUG_DRAW_SHAPES
"""Draw shapes.
Use on the flags property to control if shapes should be drawn or not.
"""
DRAW_CONSTRAINTS = lib.CP_SPACE_DEBUG_DRAW_CONSTRAINTS
"""Draw constraints.
Use on the flags property to control if constraints should be drawn or not.
"""
DRAW_COLLISION_POINTS = lib.CP_SPACE_DEBUG_DRAW_COLLISION_POINTS
"""Draw collision points.
Use on the flags property to control if collision points should be drawn or
not.
"""
shape_dynamic_color = SpaceDebugColor(52, 152, 219, 255)
shape_static_color = SpaceDebugColor(149, 165, 166, 255)
shape_kinematic_color = SpaceDebugColor(39, 174, 96, 255)
shape_sleeping_color = SpaceDebugColor(114, 148, 168, 255)
def __init__(self) -> None:
_options = ffi.new("cpSpaceDebugDrawOptions *")
self._options = _options
self.shape_outline_color = SpaceDebugColor(44, 62, 80, 255)
self.constraint_color = SpaceDebugColor(142, 68, 173, 255)
self.collision_point_color = SpaceDebugColor(231, 76, 60, 255)
# Set to false to bypass chipmunk shape drawing code
self._use_chipmunk_debug_draw = True
@ffi.callback("cpSpaceDebugDrawCircleImpl")
def f1(pos, angle, radius, outline_color, fill_color, _): # type: ignore
self.draw_circle(
Vec2d(pos.x, pos.y),
angle,
radius,
self._c(outline_color),
self._c(fill_color),
)
_options.drawCircle = f1
@ffi.callback("cpSpaceDebugDrawSegmentImpl")
def f2(a, b, color, _): # type: ignore
# sometimes a and/or b can be nan. For example if both endpoints
# of a spring is at the same position. In those cases skip calling
# the drawing method.
if math.isnan(a.x) or math.isnan(a.y) or math.isnan(b.x) or math.isnan(b.y):
return
self.draw_segment(Vec2d(a.x, a.y), Vec2d(b.x, b.y), self._c(color))
_options.drawSegment = f2
@ffi.callback("cpSpaceDebugDrawFatSegmentImpl")
def f3(a, b, radius, outline_color, fill_color, _): # type: ignore
self.draw_fat_segment(
Vec2d(a.x, a.y),
Vec2d(b.x, b.y),
radius,
self._c(outline_color),
self._c(fill_color),
)
_options.drawFatSegment = f3
@ffi.callback("cpSpaceDebugDrawPolygonImpl")
def f4(count, verts, radius, outline_color, fill_color, _): # type: ignore
vs = []
for i in range(count):
vs.append(Vec2d(verts[i].x, verts[i].y))
self.draw_polygon(vs, radius, self._c(outline_color), self._c(fill_color))
_options.drawPolygon = f4
@ffi.callback("cpSpaceDebugDrawDotImpl")
def f5(size, pos, color, _): # type: ignore
self.draw_dot(size, Vec2d(pos.x, pos.y), self._c(color))
_options.drawDot = f5
@ffi.callback("cpSpaceDebugDrawColorForShapeImpl")
def f6(_shape, data): # type: ignore
space = ffi.from_handle(data)
shape = space._get_shape(_shape)
return self.color_for_shape(shape)
_options.colorForShape = f6
self.flags = (
SpaceDebugDrawOptions.DRAW_SHAPES
| SpaceDebugDrawOptions.DRAW_CONSTRAINTS
| SpaceDebugDrawOptions.DRAW_COLLISION_POINTS
)
self._callbacks = [f1, f2, f3, f4, f5, f6]
def _get_shape_outline_color(self) -> SpaceDebugColor:
return self._c(self._options.shapeOutlineColor)
def _set_shape_outline_color(self, c: SpaceDebugColor) -> None:
self._options.shapeOutlineColor = c
shape_outline_color = property(
_get_shape_outline_color,
_set_shape_outline_color,
doc="""The outline color of shapes.
Should be a tuple of 4 ints between 0 and 255 (r,g,b,a).
Example:
>>> import pymunk
>>> s = pymunk.Space()
>>> c = pymunk.Circle(s.static_body, 10)
>>> s.add(c)
>>> options = pymunk.SpaceDebugDrawOptions()
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
>>> options.shape_outline_color = (10,20,30,40)
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=10.0, g=20.0, b=30.0, a=40.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
""",
)
def _get_constraint_color(self) -> SpaceDebugColor:
return self._c(self._options.constraintColor)
def _set_constraint_color(self, c: SpaceDebugColor) -> None:
self._options.constraintColor = c
constraint_color = property(
_get_constraint_color,
_set_constraint_color,
doc="""The color of constraints.
Should be a tuple of 4 ints between 0 and 255 (r,g,b,a).
Example:
>>> import pymunk
>>> s = pymunk.Space()
>>> b = pymunk.Body(1, 10)
>>> j = pymunk.PivotJoint(s.static_body, b, (0,0))
>>> s.add(j)
>>> options = pymunk.SpaceDebugDrawOptions()
>>> s.debug_draw(options)
draw_dot (5.0, Vec2d(0.0, 0.0), SpaceDebugColor(r=142.0, g=68.0, b=173.0, a=255.0))
draw_dot (5.0, Vec2d(0.0, 0.0), SpaceDebugColor(r=142.0, g=68.0, b=173.0, a=255.0))
>>> options.constraint_color = (10,20,30,40)
>>> s.debug_draw(options)
draw_dot (5.0, Vec2d(0.0, 0.0), SpaceDebugColor(r=10.0, g=20.0, b=30.0, a=40.0))
draw_dot (5.0, Vec2d(0.0, 0.0), SpaceDebugColor(r=10.0, g=20.0, b=30.0, a=40.0))
""",
)
def _get_collision_point_color(self) -> SpaceDebugColor:
return self._c(self._options.collisionPointColor)
def _set_collision_point_color(self, c: SpaceDebugColor) -> None:
self._options.collisionPointColor = c
collision_point_color = property(
_get_collision_point_color,
_set_collision_point_color,
doc="""The color of collisions.
Should be a tuple of 4 ints between 0 and 255 (r,g,b,a).
Example:
>>> import pymunk
>>> s = pymunk.Space()
>>> b = pymunk.Body(1,10)
>>> c1 = pymunk.Circle(b, 10)
>>> c2 = pymunk.Circle(s.static_body, 10)
>>> s.add(b, c1, c2)
>>> s.step(1)
>>> options = pymunk.SpaceDebugDrawOptions()
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=52.0, g=152.0, b=219.0, a=255.0))
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
draw_segment (Vec2d(8.0, 0.0), Vec2d(-8.0, 0.0), SpaceDebugColor(r=231.0, g=76.0, b=60.0, a=255.0))
>>> options.collision_point_color = (10,20,30,40)
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=52.0, g=152.0, b=219.0, a=255.0))
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
draw_segment (Vec2d(8.0, 0.0), Vec2d(-8.0, 0.0), SpaceDebugColor(r=10.0, g=20.0, b=30.0, a=40.0))
""",
)
def __enter__(self) -> None:
pass
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional["TracebackType"],
) -> None:
pass
def _c(self, color: ffi.CData) -> SpaceDebugColor:
return SpaceDebugColor(color.r, color.g, color.b, color.a)
def _get_flags(self) -> _DrawFlags:
return self._options.flags
def _set_flags(self, f: _DrawFlags) -> None:
self._options.flags = f
flags = property(
_get_flags,
_set_flags,
doc="""Bit flags which of shapes, joints and collisions should be drawn.
By default all 3 flags are set, meaning shapes, joints and collisions
will be drawn.
Example using the basic text only DebugDraw implementation (normally
you would the desired backend instead, such as
`pygame_util.DrawOptions` or `pyglet_util.DrawOptions`):
>>> import pymunk
>>> s = pymunk.Space()
>>> b = pymunk.Body()
>>> c = pymunk.Circle(b, 10)
>>> c.mass = 3
>>> s.add(b, c)
>>> s.add(pymunk.Circle(s.static_body, 3))
>>> s.step(0.01)
>>> options = pymunk.SpaceDebugDrawOptions()
>>> # Only draw the shapes, nothing else:
>>> options.flags = pymunk.SpaceDebugDrawOptions.DRAW_SHAPES
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=52.0, g=152.0, b=219.0, a=255.0))
draw_circle (Vec2d(0.0, 0.0), 0.0, 3.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
>>> # Draw the shapes and collision points:
>>> options.flags = pymunk.SpaceDebugDrawOptions.DRAW_SHAPES
>>> options.flags |= pymunk.SpaceDebugDrawOptions.DRAW_COLLISION_POINTS
>>> s.debug_draw(options)
draw_circle (Vec2d(0.0, 0.0), 0.0, 10.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=52.0, g=152.0, b=219.0, a=255.0))
draw_circle (Vec2d(0.0, 0.0), 0.0, 3.0, SpaceDebugColor(r=44.0, g=62.0, b=80.0, a=255.0), SpaceDebugColor(r=149.0, g=165.0, b=166.0, a=255.0))
draw_segment (Vec2d(1.0, 0.0), Vec2d(-8.0, 0.0), SpaceDebugColor(r=231.0, g=76.0, b=60.0, a=255.0))
""",
)
def draw_circle(
self,
pos: Vec2d,
angle: float,
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
print("draw_circle", (pos, angle, radius, outline_color, fill_color))
def draw_segment(self, a: Vec2d, b: Vec2d, color: SpaceDebugColor) -> None:
print("draw_segment", (a, b, color))
def draw_fat_segment(
self,
a: Vec2d,
b: Vec2d,
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
print("draw_fat_segment", (a, b, radius, outline_color, fill_color))
def draw_polygon(
self,
verts: Sequence[Vec2d],
radius: float,
outline_color: SpaceDebugColor,
fill_color: SpaceDebugColor,
) -> None:
print("draw_polygon", (verts, radius, outline_color, fill_color))
def draw_dot(self, size: float, pos: Vec2d, color: SpaceDebugColor) -> None:
print("draw_dot", (size, pos, color))
def draw_shape(self, shape: "Shape") -> None:
print("draw_shape", shape)
def color_for_shape(self, shape: "Shape") -> SpaceDebugColor:
if hasattr(shape, "color"):
return SpaceDebugColor(*shape.color) # type: ignore
color = self.shape_dynamic_color
if shape.body != None:
if shape.body.body_type == Body.STATIC:
color = self.shape_static_color
elif shape.body.body_type == Body.KINEMATIC:
color = self.shape_kinematic_color
elif shape.body.is_sleeping:
color = self.shape_sleeping_color
return color
| mit | -2,421,067,693,768,553,000 | 35.682584 | 151 | 0.581974 | false |
lucasmaystre/choix | choix/convergence.py | 1 | 2338 | import abc
import numpy as np
class ConvergenceTest(metaclass=abc.ABCMeta):
"""Abstract base class for convergence tests.
Convergence tests should implement a single function, `__call__`, which
takes a parameter vector and returns a boolean indicating whether or not
the convergence criterion is met.
"""
@abc.abstractmethod
def __call__(self, params, update=True):
"""Test whether convergence criterion is met.
The parameter `update` controls whether `params` should replace the
previous parameters (i.e., modify the state of the object).
"""
class NormOfDifferenceTest(ConvergenceTest):
"""Convergence test based on the norm of the difference vector.
This convergence test computes the difference between two successive
parameter vectors, and declares convergence when the norm of this
difference vector (normalized by the number of items) is below `tol`.
"""
def __init__(self, tol=1e-8, order=1):
self._tol = tol
self._ord = order
self._prev_params = None
def __call__(self, params, update=True):
params = np.asarray(params) - np.mean(params)
if self._prev_params is None:
if update:
self._prev_params = params
return False
dist = np.linalg.norm(self._prev_params - params, ord=self._ord)
if update:
self._prev_params = params
return dist <= self._tol * len(params)
class ScalarFunctionTest(ConvergenceTest):
"""Convergence test based on a scalar function of the parameters.
This convergence test computes the values of a scalar function of the
parameters, and declares convergence when the absolute difference between
two successive values is below `tol`.
A typical use case of this class is in conjunction with a log-likelihood
function.
"""
def __init__(self, fun, tol=1e-8):
self._fun = fun
self._tol = tol
self._prev_val = None
def __call__(self, params, update=True):
val = self._fun(params)
if self._prev_val is None:
if update:
self._prev_val = val
return False
dist = abs(val - self._prev_val)
if update:
self._prev_val = val
return dist < self._tol
| mit | 8,561,715,781,193,549,000 | 30.173333 | 77 | 0.634731 | false |
guanxi55nba/db-improvement | pylib/cqlshlib/cql3handling.py | 1 | 44570 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .cqlhandling import CqlParsingRuleSet, Hint
from cassandra.metadata import maybe_escape_name
from cassandra.metadata import escape_name
simple_cql_types = set(('ascii', 'bigint', 'blob', 'boolean', 'counter', 'decimal', 'double', 'float', 'inet', 'int',
'text', 'timestamp', 'timeuuid', 'uuid', 'varchar', 'varint'))
simple_cql_types.difference_update(('set', 'map', 'list'))
from . import helptopics
cqldocs = helptopics.CQL3HelpTopics()
class UnexpectedTableStructure(UserWarning):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
SYSTEM_KEYSPACES = ('system', 'system_traces', 'system_auth')
NONALTERBALE_KEYSPACES = ('system')
class Cql3ParsingRuleSet(CqlParsingRuleSet):
keywords = set((
'select', 'from', 'where', 'and', 'key', 'insert', 'update', 'with',
'limit', 'using', 'use', 'count', 'set',
'begin', 'apply', 'batch', 'truncate', 'delete', 'in', 'create',
'keyspace', 'schema', 'columnfamily', 'table', 'index', 'on', 'drop',
'primary', 'into', 'values', 'timestamp', 'ttl', 'alter', 'add', 'type',
'compact', 'storage', 'order', 'by', 'asc', 'desc', 'clustering',
'token', 'writetime', 'map', 'list', 'to', 'custom', 'if', 'not'
))
unreserved_keywords = set((
'key', 'clustering', 'ttl', 'compact', 'storage', 'type', 'values', 'custom', 'exists'
))
columnfamily_layout_options = (
('bloom_filter_fp_chance', None),
('comment', None),
('dclocal_read_repair_chance', 'local_read_repair_chance'),
('gc_grace_seconds', None),
('min_index_interval', None),
('max_index_interval', None),
('read_repair_chance', None),
('default_time_to_live', None),
('speculative_retry', None),
('memtable_flush_period_in_ms', None),
)
columnfamily_layout_map_options = (
# (CQL3 option name, schema_columnfamilies column name (or None if same),
# list of known map keys)
('compaction', 'compaction_strategy_options',
('class', 'max_threshold', 'tombstone_compaction_interval', 'tombstone_threshold', 'enabled', 'unchecked_tombstone_compaction')),
('compression', 'compression_parameters',
('sstable_compression', 'chunk_length_kb', 'crc_check_chance')),
('caching', None,
('rows_per_partition', 'keys')),
)
obsolete_cf_options = ()
consistency_levels = (
'ANY',
'ONE',
'TWO',
'THREE',
'QUORUM',
'ALL',
'LOCAL_QUORUM',
'EACH_QUORUM',
'SERIAL'
)
maybe_escape_name = staticmethod(maybe_escape_name)
escape_name = staticmethod(escape_name)
@classmethod
def escape_value(cls, value):
if value is None:
return 'NULL' # this totally won't work
if isinstance(value, bool):
value = str(value).lower()
elif isinstance(value, float):
return '%f' % value
elif isinstance(value, int):
return str(value)
return "'%s'" % value.replace("'", "''")
@staticmethod
def dequote_name(name):
name = name.strip()
if name == '':
return name
if name[0] == '"' and name[-1] == '"':
return name[1:-1].replace('""', '"')
else:
return name.lower()
@staticmethod
def dequote_value(cqlword):
cqlword = cqlword.strip()
if cqlword == '':
return cqlword
if cqlword[0] == "'" and cqlword[-1] == "'":
cqlword = cqlword[1:-1].replace("''", "'")
return cqlword
CqlRuleSet = Cql3ParsingRuleSet()
# convenience for remainder of module
completer_for = CqlRuleSet.completer_for
explain_completion = CqlRuleSet.explain_completion
dequote_value = CqlRuleSet.dequote_value
dequote_name = CqlRuleSet.dequote_name
escape_value = CqlRuleSet.escape_value
maybe_escape_name = CqlRuleSet.maybe_escape_name
# BEGIN SYNTAX/COMPLETION RULE DEFINITIONS
syntax_rules = r'''
<Start> ::= <CQL_Statement>*
;
<CQL_Statement> ::= [statements]=<statementBody> ";"
;
# the order of these terminal productions is significant:
<endline> ::= /\n/ ;
JUNK ::= /([ \t\r\f\v]+|(--|[/][/])[^\n\r]*([\n\r]|$)|[/][*].*?[*][/])/ ;
<stringLiteral> ::= /'([^']|'')*'/ ;
<quotedName> ::= /"([^"]|"")*"/ ;
<float> ::= /-?[0-9]+\.[0-9]+/ ;
<uuid> ::= /[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/ ;
<blobLiteral> ::= /0x[0-9a-f]+/ ;
<wholenumber> ::= /[0-9]+/ ;
<identifier> ::= /[a-z][a-z0-9_]*/ ;
<colon> ::= ":" ;
<star> ::= "*" ;
<endtoken> ::= ";" ;
<op> ::= /[-+=,().]/ ;
<cmp> ::= /[<>]=?/ ;
<brackets> ::= /[][{}]/ ;
<integer> ::= "-"? <wholenumber> ;
<boolean> ::= "true"
| "false"
;
<unclosedString> ::= /'([^']|'')*/ ;
<unclosedName> ::= /"([^"]|"")*/ ;
<unclosedComment> ::= /[/][*].*$/ ;
<term> ::= <stringLiteral>
| <integer>
| <float>
| <uuid>
| <boolean>
| <blobLiteral>
| <collectionLiteral>
| <functionName> <functionArguments>
| "NULL"
;
<functionArguments> ::= "(" ( <term> ( "," <term> )* )? ")"
;
<tokenDefinition> ::= token="TOKEN" "(" <term> ( "," <term> )* ")"
| <term>
;
<cident> ::= <quotedName>
| <identifier>
| <unreservedKeyword>
;
<colname> ::= <cident> ; # just an alias
<collectionLiteral> ::= <listLiteral>
| <setLiteral>
| <mapLiteral>
;
<listLiteral> ::= "[" ( <term> ( "," <term> )* )? "]"
;
<setLiteral> ::= "{" ( <term> ( "," <term> )* )? "}"
;
<mapLiteral> ::= "{" <term> ":" <term> ( "," <term> ":" <term> )* "}"
;
<functionName> ::= <identifier> | "TOKEN"
;
<statementBody> ::= <useStatement>
| <selectStatement>
| <dataChangeStatement>
| <schemaChangeStatement>
| <authenticationStatement>
| <authorizationStatement>
;
<dataChangeStatement> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
| <truncateStatement>
| <batchStatement>
;
<schemaChangeStatement> ::= <createKeyspaceStatement>
| <createColumnFamilyStatement>
| <createIndexStatement>
| <createUserTypeStatement>
| <createTriggerStatement>
| <dropKeyspaceStatement>
| <dropColumnFamilyStatement>
| <dropIndexStatement>
| <dropUserTypeStatement>
| <dropTriggerStatement>
| <alterTableStatement>
| <alterKeyspaceStatement>
| <alterUserTypeStatement>
;
<authenticationStatement> ::= <createUserStatement>
| <alterUserStatement>
| <dropUserStatement>
| <listUsersStatement>
;
<authorizationStatement> ::= <grantStatement>
| <revokeStatement>
| <listPermissionsStatement>
;
# timestamp is included here, since it's also a keyword
<simpleStorageType> ::= typename=( <identifier> | <stringLiteral> | <K_TIMESTAMP> ) ;
<userType> ::= utname=<cfOrKsName> ;
<storageType> ::= <simpleStorageType> | <collectionType> | <frozenCollectionType> | <userType> ;
# Note: autocomplete for frozen collection types does not handle nesting past depth 1 properly,
# but that's a lot of work to fix for little benefit.
<collectionType> ::= "map" "<" <simpleStorageType> "," ( <simpleStorageType> | <userType> ) ">"
| "list" "<" ( <simpleStorageType> | <userType> ) ">"
| "set" "<" ( <simpleStorageType> | <userType> ) ">"
;
<frozenCollectionType> ::= "frozen" "<" "map" "<" <storageType> "," <storageType> ">" ">"
| "frozen" "<" "list" "<" <storageType> ">" ">"
| "frozen" "<" "set" "<" <storageType> ">" ">"
;
<columnFamilyName> ::= ( ksname=<cfOrKsName> dot="." )? cfname=<cfOrKsName> ;
<userTypeName> ::= ( ksname=<cfOrKsName> dot="." )? utname=<cfOrKsName> ;
<keyspaceName> ::= ksname=<cfOrKsName> ;
<nonSystemKeyspaceName> ::= ksname=<cfOrKsName> ;
<alterableKeyspaceName> ::= ksname=<cfOrKsName> ;
<cfOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<unreservedKeyword> ::= nocomplete=
( <K_KEY>
| <K_CLUSTERING>
# | <K_COUNT> -- to get count(*) completion, treat count as reserved
| <K_TTL>
| <K_COMPACT>
| <K_STORAGE>
| <K_TYPE>
| <K_VALUES> )
;
<property> ::= [propname]=<cident> propeq="=" [propval]=<propertyValue>
;
<propertyValue> ::= propsimpleval=( <stringLiteral>
| <identifier>
| <integer>
| <float>
| <unreservedKeyword> )
# we don't use <mapLiteral> here so we can get more targeted
# completions:
| propsimpleval="{" [propmapkey]=<term> ":" [propmapval]=<term>
( ender="," [propmapkey]=<term> ":" [propmapval]=<term> )*
ender="}"
;
'''
def prop_equals_completer(ctxt, cass):
if not working_on_keyspace(ctxt):
# we know if the thing in the property name position is "compact" or
# "clustering" that there won't actually be an equals sign, because
# there are no properties by those names. there are, on the other hand,
# table properties that start with those keywords which don't have
# equals signs at all.
curprop = ctxt.get_binding('propname')[-1].upper()
if curprop in ('COMPACT', 'CLUSTERING'):
return ()
return ['=']
completer_for('property', 'propeq')(prop_equals_completer)
@completer_for('property', 'propname')
def prop_name_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_name_completer(ctxt, cass)
else:
return cf_prop_name_completer(ctxt, cass)
@completer_for('propertyValue', 'propsimpleval')
def prop_val_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_completer(ctxt, cass)
else:
return cf_prop_val_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapkey')
def prop_val_mapkey_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapkey_completer(ctxt, cass)
else:
return cf_prop_val_mapkey_completer(ctxt, cass)
@completer_for('propertyValue', 'propmapval')
def prop_val_mapval_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapval_completer(ctxt, cass)
else:
return cf_prop_val_mapval_completer(ctxt, cass)
@completer_for('propertyValue', 'ender')
def prop_val_mapender_completer(ctxt, cass):
if working_on_keyspace(ctxt):
return ks_prop_val_mapender_completer(ctxt, cass)
else:
return cf_prop_val_mapender_completer(ctxt, cass)
def ks_prop_name_completer(ctxt, cass):
optsseen = ctxt.get_binding('propname', ())
if 'replication' not in optsseen:
return ['replication']
return ["durable_writes"]
def ks_prop_val_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname == 'durable_writes':
return ["'true'", "'false'"]
if optname == 'replication':
return ["{'class': '"]
return ()
def ks_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return ["'class'"]
if repclass in CqlRuleSet.replication_factor_strategies:
opts = set(('replication_factor',))
elif repclass == 'NetworkTopologyStrategy':
return [Hint('<dc_name>')]
return map(escape_value, opts.difference(keysseen))
def ks_prop_val_mapval_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return ()
currentkey = dequote_value(ctxt.get_binding('propmapkey')[-1])
if currentkey == 'class':
return map(escape_value, CqlRuleSet.replication_strategies)
return [Hint('<term>')]
def ks_prop_val_mapender_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
if optname != 'replication':
return [',']
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
for k, v in zip(keysseen, valsseen):
if k == 'class':
repclass = v
break
else:
return [',']
if repclass in CqlRuleSet.replication_factor_strategies:
if 'replication_factor' not in keysseen:
return [',']
if repclass == 'NetworkTopologyStrategy' and len(keysseen) == 1:
return [',']
return ['}']
def cf_prop_name_completer(ctxt, cass):
return [c[0] for c in (CqlRuleSet.columnfamily_layout_options +
CqlRuleSet.columnfamily_layout_map_options)]
def cf_prop_val_completer(ctxt, cass):
exist_opts = ctxt.get_binding('propname')
this_opt = exist_opts[-1]
if this_opt == 'compression':
return ["{'sstable_compression': '"]
if this_opt == 'compaction':
return ["{'class': '"]
if this_opt == 'caching':
return ["{'keys': '"]
if any(this_opt == opt[0] for opt in CqlRuleSet.obsolete_cf_options):
return ["'<obsolete_option>'"]
if this_opt in ('read_repair_chance', 'bloom_filter_fp_chance',
'dclocal_read_repair_chance'):
return [Hint('<float_between_0_and_1>')]
if this_opt in ('min_compaction_threshold', 'max_compaction_threshold',
'gc_grace_seconds', 'min_index_interval', 'max_index_interval'):
return [Hint('<integer>')]
return [Hint('<option_value>')]
def cf_prop_val_mapkey_completer(ctxt, cass):
optname = ctxt.get_binding('propname')[-1]
for cql3option, _, subopts in CqlRuleSet.columnfamily_layout_map_options:
if optname == cql3option:
break
else:
return ()
keysseen = map(dequote_value, ctxt.get_binding('propmapkey', ()))
valsseen = map(dequote_value, ctxt.get_binding('propmapval', ()))
pairsseen = dict(zip(keysseen, valsseen))
if optname == 'compression':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'caching':
return map(escape_value, set(subopts).difference(keysseen))
if optname == 'compaction':
opts = set(subopts)
try:
csc = pairsseen['class']
except KeyError:
return ["'class'"]
csc = csc.split('.')[-1]
if csc == 'SizeTieredCompactionStrategy':
opts.add('min_sstable_size')
opts.add('min_threshold')
opts.add('bucket_high')
opts.add('bucket_low')
opts.add('cold_reads_to_omit')
elif csc == 'LeveledCompactionStrategy':
opts.add('sstable_size_in_mb')
elif csc == 'DateTieredCompactionStrategy':
opts.add('base_time_seconds')
opts.add('max_sstable_age_days')
opts.add('timestamp_resolution')
return map(escape_value, opts)
return ()
def cf_prop_val_mapval_completer(ctxt, cass):
opt = ctxt.get_binding('propname')[-1]
key = dequote_value(ctxt.get_binding('propmapkey')[-1])
if opt == 'compaction':
if key == 'class':
return map(escape_value, CqlRuleSet.available_compaction_classes)
return [Hint('<option_value>')]
elif opt == 'compression':
if key == 'sstable_compression':
return map(escape_value, CqlRuleSet.available_compression_classes)
return [Hint('<option_value>')]
elif opt == 'caching':
if key == 'rows_per_partition':
return ["'ALL'", "'NONE'", Hint('#rows_per_partition')]
elif key == 'keys':
return ["'ALL'", "'NONE'"]
return ()
def cf_prop_val_mapender_completer(ctxt, cass):
return [',', '}']
@completer_for('tokenDefinition', 'token')
def token_word_completer(ctxt, cass):
return ['token(']
@completer_for('simpleStorageType', 'typename')
def storagetype_completer(ctxt, cass):
return simple_cql_types
@completer_for('keyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
return map(maybe_escape_name, cass.get_keyspace_names())
@completer_for('nonSystemKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in SYSTEM_KEYSPACES]
return map(maybe_escape_name, ksnames)
@completer_for('alterableKeyspaceName', 'ksname')
def ks_name_completer(ctxt, cass):
ksnames = [n for n in cass.get_keyspace_names() if n not in NONALTERBALE_KEYSPACES]
return map(maybe_escape_name, ksnames)
def cf_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
completer_for('columnFamilyName', 'ksname')(cf_ks_name_completer)
def cf_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
completer_for('columnFamilyName', 'dot')(cf_ks_dot_completer)
@completer_for('columnFamilyName', 'cfname')
def cf_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
cfnames = cass.get_columnfamily_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, cfnames)
completer_for('userTypeName', 'ksname')(cf_ks_name_completer)
completer_for('userTypeName', 'dot')(cf_ks_dot_completer)
def ut_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
utnames = cass.get_usertype_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, utnames)
completer_for('userTypeName', 'utname')(ut_name_completer)
completer_for('userType', 'utname')(ut_name_completer)
@completer_for('unreservedKeyword', 'nocomplete')
def unreserved_keyword_completer(ctxt, cass):
# we never want to provide completions through this production;
# this is always just to allow use of some keywords as column
# names, CF names, property values, etc.
return ()
def get_table_meta(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
cf = dequote_name(ctxt.get_binding('cfname'))
return cass.get_table_meta(ks, cf)
def get_ut_layout(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
ut = dequote_name(ctxt.get_binding('utname'))
return cass.get_usertype_layout(ks, ut)
def working_on_keyspace(ctxt):
wat = ctxt.get_binding('wat').upper()
if wat in ('KEYSPACE', 'SCHEMA'):
return True
return False
syntax_rules += r'''
<useStatement> ::= "USE" <keyspaceName>
;
<selectStatement> ::= "SELECT" <selectClause>
"FROM" cf=<columnFamilyName>
( "WHERE" <whereClause> )?
( "ORDER" "BY" <orderByClause> ( "," <orderByClause> )* )?
( "LIMIT" limit=<wholenumber> )?
( "ALLOW" "FILTERING" )?
;
<whereClause> ::= <relation> ( "AND" <relation> )*
;
<relation> ::= [rel_lhs]=<cident> ( "=" | "<" | ">" | "<=" | ">=" | "CONTAINS" ( "KEY" )? ) <term>
| token="TOKEN" "(" [rel_tokname]=<cident>
( "," [rel_tokname]=<cident> )*
")" ("=" | "<" | ">" | "<=" | ">=") <tokenDefinition>
| [rel_lhs]=<cident> "IN" "(" <term> ( "," <term> )* ")"
;
<selectClause> ::= "DISTINCT"? <selector> ("AS" <cident>)? ("," <selector> ("AS" <cident>)?)*
| "*"
| "COUNT" "(" star=( "*" | "1" ) ")" ("AS" <cident>)?
;
<udtSubfieldSelection> ::= <identifier> "." <identifier>
;
<selector> ::= [colname]=<cident>
| <udtSubfieldSelection>
| "WRITETIME" "(" [colname]=<cident> ")"
| "TTL" "(" [colname]=<cident> ")"
| <functionName> <selectionFunctionArguments>
;
<selectionFunctionArguments> ::= "(" ( <selector> ( "," <selector> )* )? ")"
;
<orderByClause> ::= [ordercol]=<cident> ( "ASC" | "DESC" )?
;
'''
@completer_for('orderByClause', 'ordercol')
def select_order_column_completer(ctxt, cass):
prev_order_cols = ctxt.get_binding('ordercol', ())
keyname = ctxt.get_binding('keyname')
if keyname is None:
keyname = ctxt.get_binding('rel_lhs', ())
if not keyname:
return [Hint("Can't ORDER BY here: need to specify partition key in WHERE clause")]
layout = get_table_meta(ctxt, cass)
order_by_candidates = [col.name for col in layout.clustering_key]
if len(order_by_candidates) > len(prev_order_cols):
return [maybe_escape_name(order_by_candidates[len(prev_order_cols)])]
return [Hint('No more orderable columns here.')]
@completer_for('relation', 'token')
def relation_token_word_completer(ctxt, cass):
return ['TOKEN(']
@completer_for('relation', 'rel_tokname')
def relation_token_subject_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return [key.name for key in layout.partition_key]
@completer_for('relation', 'rel_lhs')
def select_relation_lhs_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
filterable = set((layout.partition_key[0].name, layout.clustering_key[0].name))
already_filtered_on = map(dequote_name, ctxt.get_binding('rel_lhs', ()))
for num in range(1, len(layout.partition_key)):
if layout.partition_key[num - 1].name in already_filtered_on:
filterable.add(layout.partition_key[num].name)
else:
break
for num in range(1, len(layout.clustering_key)):
if layout.clustering_key[num - 1].name in already_filtered_on:
filterable.add(layout.clustering_key[num].name)
else:
break
for cd in layout.columns.values():
if cd.index:
filterable.add(cd.name)
return map(maybe_escape_name, filterable)
@completer_for('selectClause', 'star')
def select_count_star_completer(ctxt, cass):
return ['*']
explain_completion('selector', 'colname')
syntax_rules += r'''
<insertStatement> ::= "INSERT" "INTO" cf=<columnFamilyName>
"(" [colname]=<cident> "," [colname]=<cident>
( "," [colname]=<cident> )* ")"
"VALUES" "(" [newval]=<term> valcomma="," [newval]=<term>
( valcomma="," [newval]=<term> )* valcomma=")"
( "IF" "NOT" "EXISTS")?
( "USING" [insertopt]=<usingOption>
( "AND" [insertopt]=<usingOption> )* )?
;
<usingOption> ::= "TIMESTAMP" <wholenumber>
| "TTL" <wholenumber>
;
'''
def regular_column_names(table_meta):
if not table_meta or not table_meta.columns:
return []
regular_coulmns = list(set(table_meta.columns.keys())
- set([key.name for key in table_meta.partition_key])
- set([key.name for key in table_meta.clustering_key]))
return regular_coulmns
@completer_for('insertStatement', 'colname')
def insert_colname_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = set(map(dequote_name, ctxt.get_binding('colname', ())))
keycols = layout.primary_key
for k in keycols:
if k.name not in colnames:
return [maybe_escape_name(k.name)]
normalcols = set(regular_column_names(layout)) - colnames
return map(maybe_escape_name, normalcols)
@completer_for('insertStatement', 'newval')
def insert_newval_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
insertcols = map(dequote_name, ctxt.get_binding('colname'))
valuesdone = ctxt.get_binding('newval', ())
if len(valuesdone) >= len(insertcols):
return []
curcol = insertcols[len(valuesdone)]
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype in ('map', 'set'):
return ['{']
if coltype == 'list':
return ['[']
if coltype == 'boolean':
return ['true', 'false']
return [Hint('<value for %s (%s)>' % (maybe_escape_name(curcol),
cqltype.cql_parameterized_type()))]
@completer_for('insertStatement', 'valcomma')
def insert_valcomma_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
numcols = len(ctxt.get_binding('colname', ()))
numvals = len(ctxt.get_binding('newval', ()))
if numcols > numvals:
return [',']
return [')']
@completer_for('insertStatement', 'insertopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('insertopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<updateStatement> ::= "UPDATE" cf=<columnFamilyName>
( "USING" [updateopt]=<usingOption>
( "AND" [updateopt]=<usingOption> )* )?
"SET" <assignment> ( "," <assignment> )*
"WHERE" <whereClause>
( "IF" <conditions> )?
;
<assignment> ::= updatecol=<cident>
( "=" update_rhs=( <term> | <cident> )
( counterop=( "+" | "-" ) inc=<wholenumber>
| listadder="+" listcol=<cident> )?
| indexbracket="[" <term> "]" "=" <term> )
;
<conditions> ::= <condition> ( "AND" <condition> )*
;
<condition> ::= <cident> ( "[" <term> "]" )? ( ( "=" | "<" | ">" | "<=" | ">=" | "!=" ) <term>
| "IN" "(" <term> ( "," <term> )* ")")
;
'''
@completer_for('updateStatement', 'updateopt')
def insert_option_completer(ctxt, cass):
opts = set('TIMESTAMP TTL'.split())
for opt in ctxt.get_binding('updateopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('assignment', 'updatecol')
def update_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
@completer_for('assignment', 'update_rhs')
def update_countername_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
cqltype = layout.columns[curcol].data_type
coltype = cqltype.typename
if coltype == 'counter':
return [maybe_escape_name(curcol)]
if coltype in ('map', 'set'):
return ["{"]
if coltype == 'list':
return ["["]
return [Hint('<term (%s)>' % cqltype.cql_parameterized_type())]
@completer_for('assignment', 'counterop')
def update_counterop_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
return ['+', '-'] if layout.columns[curcol].data_type.typename == 'counter' else []
@completer_for('assignment', 'inc')
def update_counter_inc_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
if layout.columns[curcol].data_type.typename == 'counter':
return [Hint('<wholenumber>')]
return []
@completer_for('assignment', 'listadder')
def update_listadder_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
return ['+']
return []
@completer_for('assignment', 'listcol')
def update_listcol_completer(ctxt, cass):
rhs = ctxt.get_binding('update_rhs')
if rhs.startswith('['):
colname = dequote_name(ctxt.get_binding('updatecol'))
return [maybe_escape_name(colname)]
return []
@completer_for('assignment', 'indexbracket')
def update_indexbracket_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
curcol = dequote_name(ctxt.get_binding('updatecol', ''))
coltype = layout.columns[curcol].data_type.typename
if coltype in ('map', 'list'):
return ['[']
return []
syntax_rules += r'''
<deleteStatement> ::= "DELETE" ( <deleteSelector> ( "," <deleteSelector> )* )?
"FROM" cf=<columnFamilyName>
( "USING" [delopt]=<deleteOption> )?
"WHERE" <whereClause>
( "IF" ( "EXISTS" | <conditions> ) )?
;
<deleteSelector> ::= delcol=<cident> ( memberbracket="[" memberselector=<term> "]" )?
;
<deleteOption> ::= "TIMESTAMP" <wholenumber>
;
'''
@completer_for('deleteStatement', 'delopt')
def delete_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('delopt', ()):
opts.discard(opt.split()[0])
return opts
@completer_for('deleteSelector', 'delcol')
def delete_delcol_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
return map(maybe_escape_name, regular_column_names(layout))
syntax_rules += r'''
<batchStatement> ::= "BEGIN" ( "UNLOGGED" | "COUNTER" )? "BATCH"
( "USING" [batchopt]=<usingOption>
( "AND" [batchopt]=<usingOption> )* )?
[batchstmt]=<batchStatementMember> ";"?
( [batchstmt]=<batchStatementMember> ";"? )*
"APPLY" "BATCH"
;
<batchStatementMember> ::= <insertStatement>
| <updateStatement>
| <deleteStatement>
;
'''
@completer_for('batchStatement', 'batchopt')
def batch_opt_completer(ctxt, cass):
opts = set('TIMESTAMP'.split())
for opt in ctxt.get_binding('batchopt', ()):
opts.discard(opt.split()[0])
return opts
syntax_rules += r'''
<truncateStatement> ::= "TRUNCATE" cf=<columnFamilyName>
;
'''
syntax_rules += r'''
<createKeyspaceStatement> ::= "CREATE" wat=( "KEYSPACE" | "SCHEMA" ) ("IF" "NOT" "EXISTS")? ksname=<cfOrKsName>
"WITH" <property> ( "AND" <property> )*
;
'''
@completer_for('createKeyspaceStatement', 'wat')
def create_ks_wat_completer(ctxt, cass):
# would prefer to get rid of the "schema" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['KEYSPACE']
return ['KEYSPACE', 'SCHEMA']
syntax_rules += r'''
<createColumnFamilyStatement> ::= "CREATE" wat=( "COLUMNFAMILY" | "TABLE" ) ("IF" "NOT" "EXISTS")?
( ks=<nonSystemKeyspaceName> dot="." )? cf=<cfOrKsName>
"(" ( <singleKeyCfSpec> | <compositeKeyCfSpec> ) ")"
( "WITH" <cfamProperty> ( "AND" <cfamProperty> )* )?
;
<cfamProperty> ::= <property>
| "COMPACT" "STORAGE"
| "CLUSTERING" "ORDER" "BY" "(" <cfamOrdering>
( "," <cfamOrdering> )* ")"
;
<cfamOrdering> ::= [ordercol]=<cident> ( "ASC" | "DESC" )
;
<singleKeyCfSpec> ::= [newcolname]=<cident> <storageType> "PRIMARY" "KEY"
( "," [newcolname]=<cident> <storageType> )*
;
<compositeKeyCfSpec> ::= [newcolname]=<cident> <storageType>
"," [newcolname]=<cident> <storageType> ( "static" )?
( "," [newcolname]=<cident> <storageType> ( "static" )? )*
"," "PRIMARY" k="KEY" p="(" ( partkey=<pkDef> | [pkey]=<cident> )
( c="," [pkey]=<cident> )* ")"
;
<pkDef> ::= "(" [ptkey]=<cident> "," [ptkey]=<cident>
( "," [ptkey]=<cident> )* ")"
;
'''
@completer_for('cfamOrdering', 'ordercol')
def create_cf_clustering_order_colname_completer(ctxt, cass):
colnames = map(dequote_name, ctxt.get_binding('newcolname', ()))
# Definitely some of these aren't valid for ordering, but I'm not sure
# precisely which are. This is good enough for now
return colnames
@completer_for('createColumnFamilyStatement', 'wat')
def create_cf_wat_completer(ctxt, cass):
# would prefer to get rid of the "columnfamily" nomenclature in cql3
if ctxt.get_binding('partial', '') == '':
return ['TABLE']
return ['TABLE', 'COLUMNFAMILY']
explain_completion('createColumnFamilyStatement', 'cf', '<new_table_name>')
explain_completion('compositeKeyCfSpec', 'newcolname', '<new_column_name>')
@completer_for('createColumnFamilyStatement', 'dot')
def create_cf_ks_dot_completer(ctxt, cass):
ks = dequote_name(ctxt.get_binding('ks'))
if ks in cass.get_keyspace_names():
return ['.']
return []
@completer_for('pkDef', 'ptkey')
def create_cf_pkdef_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'pkey')
def create_cf_composite_key_declaration_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('ptkey', ()) + ctxt.get_binding('pkey', ())
pieces_already = map(dequote_name, pieces_already)
while cols_declared[0] in pieces_already:
cols_declared = cols_declared[1:]
if len(cols_declared) < 2:
return ()
return [maybe_escape_name(cols_declared[0])]
@completer_for('compositeKeyCfSpec', 'k')
def create_cf_composite_primary_key_keyword_completer(ctxt, cass):
return ['KEY (']
@completer_for('compositeKeyCfSpec', 'p')
def create_cf_composite_primary_key_paren_completer(ctxt, cass):
return ['(']
@completer_for('compositeKeyCfSpec', 'c')
def create_cf_composite_primary_key_comma_completer(ctxt, cass):
cols_declared = ctxt.get_binding('newcolname')
pieces_already = ctxt.get_binding('pkey', ())
if len(pieces_already) >= len(cols_declared) - 1:
return ()
return [',']
syntax_rules += r'''
<createIndexStatement> ::= "CREATE" "CUSTOM"? "INDEX" ("IF" "NOT" "EXISTS")? indexname=<identifier>? "ON"
cf=<columnFamilyName> "(" (
col=<cident> |
"keys(" col=<cident> ")" |
"fullCollection(" col=<cident> ")"
) ")"
( "USING" <stringLiteral> ( "WITH" "OPTIONS" "=" <mapLiteral> )? )?
;
<createUserTypeStatement> ::= "CREATE" "TYPE" ( ks=<nonSystemKeyspaceName> dot="." )? typename=<cfOrKsName> "(" newcol=<cident> <storageType>
( "," [newcolname]=<cident> <storageType> )*
")"
;
'''
explain_completion('createIndexStatement', 'indexname', '<new_index_name>')
explain_completion('createUserTypeStatement', 'typename', '<new_type_name>')
explain_completion('createUserTypeStatement', 'newcol', '<new_field_name>')
@completer_for('createIndexStatement', 'col')
def create_index_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
colnames = [cd.name for cd in layout.columns.values() if not cd.index]
return map(maybe_escape_name, colnames)
syntax_rules += r'''
<dropKeyspaceStatement> ::= "DROP" "KEYSPACE" ("IF" "EXISTS")? ksname=<nonSystemKeyspaceName>
;
<dropColumnFamilyStatement> ::= "DROP" ( "COLUMNFAMILY" | "TABLE" ) ("IF" "EXISTS")? cf=<columnFamilyName>
;
<indexName> ::= ( ksname=<idxOrKsName> dot="." )? idxname=<idxOrKsName> ;
<idxOrKsName> ::= <identifier>
| <quotedName>
| <unreservedKeyword>;
<dropIndexStatement> ::= "DROP" "INDEX" ("IF" "EXISTS")? idx=<indexName>
;
<dropUserTypeStatement> ::= "DROP" "TYPE" ut=<userTypeName>
;
'''
@completer_for('indexName', 'ksname')
def idx_ks_name_completer(ctxt, cass):
return [maybe_escape_name(ks) + '.' for ks in cass.get_keyspace_names()]
@completer_for('indexName', 'dot')
def idx_ks_dot_completer(ctxt, cass):
name = dequote_name(ctxt.get_binding('ksname'))
if name in cass.get_keyspace_names():
return ['.']
return []
@completer_for('indexName', 'idxname')
def idx_ks_idx_name_completer(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
try:
idxnames = cass.get_index_names(ks)
except Exception:
if ks is None:
return ()
raise
return map(maybe_escape_name, idxnames)
syntax_rules += r'''
<alterTableStatement> ::= "ALTER" wat=( "COLUMNFAMILY" | "TABLE" ) cf=<columnFamilyName>
<alterInstructions>
;
<alterInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType> ("static")?
| "DROP" existcol=<cident>
| "WITH" <cfamProperty> ( "AND" <cfamProperty> )*
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
<alterUserTypeStatement> ::= "ALTER" "TYPE" ut=<userTypeName>
<alterTypeInstructions>
;
<alterTypeInstructions> ::= "ALTER" existcol=<cident> "TYPE" <storageType>
| "ADD" newcol=<cident> <storageType>
| "RENAME" existcol=<cident> "TO" newcol=<cident>
( "AND" existcol=<cident> "TO" newcol=<cident> )*
;
'''
@completer_for('alterInstructions', 'existcol')
def alter_table_col_completer(ctxt, cass):
layout = get_table_meta(ctxt, cass)
cols = [str(md) for md in layout.columns]
return map(maybe_escape_name, cols)
@completer_for('alterTypeInstructions', 'existcol')
def alter_type_field_completer(ctxt, cass):
layout = get_ut_layout(ctxt, cass)
fields = [tuple[0] for tuple in layout]
return map(maybe_escape_name, fields)
explain_completion('alterInstructions', 'newcol', '<new_column_name>')
explain_completion('alterTypeInstructions', 'newcol', '<new_field_name>')
syntax_rules += r'''
<alterKeyspaceStatement> ::= "ALTER" wat=( "KEYSPACE" | "SCHEMA" ) ks=<alterableKeyspaceName>
"WITH" <property> ( "AND" <property> )*
;
'''
syntax_rules += r'''
<username> ::= name=( <identifier> | <stringLiteral> )
;
<createUserStatement> ::= "CREATE" "USER" ( "IF" "NOT" "EXISTS" )? <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<alterUserStatement> ::= "ALTER" "USER" <username>
( "WITH" "PASSWORD" <stringLiteral> )?
( "SUPERUSER" | "NOSUPERUSER" )?
;
<dropUserStatement> ::= "DROP" "USER" ( "IF" "EXISTS" )? <username>
;
<listUsersStatement> ::= "LIST" "USERS"
;
'''
syntax_rules += r'''
<grantStatement> ::= "GRANT" <permissionExpr> "ON" <resource> "TO" <username>
;
<revokeStatement> ::= "REVOKE" <permissionExpr> "ON" <resource> "FROM" <username>
;
<listPermissionsStatement> ::= "LIST" <permissionExpr>
( "ON" <resource> )? ( "OF" <username> )? "NORECURSIVE"?
;
<permission> ::= "AUTHORIZE"
| "CREATE"
| "ALTER"
| "DROP"
| "SELECT"
| "MODIFY"
;
<permissionExpr> ::= ( <permission> "PERMISSION"? )
| ( "ALL" "PERMISSIONS"? )
;
<resource> ::= <dataResource>
;
<dataResource> ::= ( "ALL" "KEYSPACES" )
| ( "KEYSPACE" <keyspaceName> )
| ( "TABLE"? <columnFamilyName> )
;
'''
@completer_for('username', 'name')
def username_name_completer(ctxt, cass):
def maybe_quote(name):
if CqlRuleSet.is_valid_cql3_name(name):
return name
return "'%s'" % name
# disable completion for CREATE USER.
if ctxt.matched[0][0] == 'K_CREATE':
return [Hint('<username>')]
session = cass.session
return [maybe_quote(row.values()[0].replace("'", "''")) for row in session.execute("LIST USERS")]
syntax_rules += r'''
<createTriggerStatement> ::= "CREATE" "TRIGGER" ( "IF" "NOT" "EXISTS" )? <cident>
"ON" cf=<columnFamilyName> "USING" class=<stringLiteral>
;
<dropTriggerStatement> ::= "DROP" "TRIGGER" ( "IF" "EXISTS" )? triggername=<cident>
"ON" cf=<columnFamilyName>
;
'''
explain_completion('createTriggerStatement', 'class', '\'fully qualified class name\'')
def get_trigger_names(ctxt, cass):
ks = ctxt.get_binding('ksname', None)
if ks is not None:
ks = dequote_name(ks)
return cass.get_trigger_names(ks)
@completer_for('dropTriggerStatement', 'triggername')
def alter_type_field_completer(ctxt, cass):
names = get_trigger_names(ctxt, cass)
return map(maybe_escape_name, names)
# END SYNTAX/COMPLETION RULE DEFINITIONS
CqlRuleSet.append_rules(syntax_rules)
| apache-2.0 | 6,892,606,226,889,685,000 | 36.141667 | 141 | 0.553511 | false |
klmitch/glance | glance/db/sqlalchemy/migrate_repo/versions/042_add_changes_to_reinstall_unique_metadef_constraints.py | 6 | 24462 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import migrate
import sqlalchemy
from sqlalchemy import (func, Index, inspect, orm, String, Table, type_coerce)
# The _upgrade...get_duplicate() def's are separate functions to
# accommodate sqlite which locks the database against updates as long as
# db_recs is active.
# In addition, sqlite doesn't support the function 'concat' between
# Strings and Integers, so, the updating of records is also adjusted.
def _upgrade_metadef_namespaces_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_namespaces.c.id),
metadef_namespaces.c.namespace)
.group_by(metadef_namespaces.c.namespace)
.having(func.count(metadef_namespaces.c.namespace) > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace': row[1]})
session.close()
return dbrecs
def _upgrade_metadef_objects_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_objects = Table('metadef_objects', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_objects.c.id),
metadef_objects.c.namespace_id,
metadef_objects.c.name)
.group_by(metadef_objects.c.namespace_id,
metadef_objects.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_properties_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_properties = Table('metadef_properties', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_properties.c.id),
metadef_properties.c.namespace_id,
metadef_properties.c.name)
.group_by(metadef_properties.c.namespace_id,
metadef_properties.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_tags_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_tags = Table('metadef_tags', meta, autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_tags.c.id),
metadef_tags.c.namespace_id,
metadef_tags.c.name)
.group_by(metadef_tags.c.namespace_id,
metadef_tags.c.name)
.having(func.count() > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'namespace_id': row[1], 'name': row[2]})
session.close()
return dbrecs
def _upgrade_metadef_resource_types_get_duplicates(migrate_engine):
meta = sqlalchemy.schema.MetaData(migrate_engine)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
session = orm.sessionmaker(bind=migrate_engine)()
db_recs = (session.query(func.min(metadef_resource_types.c.id),
metadef_resource_types.c.name)
.group_by(metadef_resource_types.c.name)
.having(func.count(metadef_resource_types.c.name) > 1))
dbrecs = []
for row in db_recs:
dbrecs.append({'id': row[0], 'name': row[1]})
session.close()
return dbrecs
def _upgrade_data(migrate_engine):
# Rename duplicates to be unique.
meta = sqlalchemy.schema.MetaData(migrate_engine)
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
# Fix duplicate metadef_namespaces
# Update the non-first record(s) with an unique namespace value
dbrecs = _upgrade_metadef_namespaces_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_namespaces.update()
.where(metadef_namespaces.c.id > row['id'])
.where(metadef_namespaces.c.namespace == row['namespace'])
)
if migrate_engine.name == 'sqlite':
s = (s.values(namespace=(row['namespace'] + '-DUPL-' +
type_coerce(metadef_namespaces.c.id,
String)),
display_name=(row['namespace'] + '-DUPL-' +
type_coerce(metadef_namespaces.c.id,
String))))
else:
s = s.values(namespace=func.concat(row['namespace'],
'-DUPL-',
metadef_namespaces.c.id),
display_name=func.concat(row['namespace'],
'-DUPL-',
metadef_namespaces.c.id))
s.execute()
# Fix duplicate metadef_objects
dbrecs = _upgrade_metadef_objects_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_objects.update()
.where(metadef_objects.c.id > row['id'])
.where(metadef_objects.c.namespace_id == row['namespace_id'])
.where(metadef_objects.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-'
+ type_coerce(metadef_objects.c.id, String))))
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_objects.c.id))
s.execute()
# Fix duplicate metadef_properties
dbrecs = _upgrade_metadef_properties_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_properties.update()
.where(metadef_properties.c.id > row['id'])
.where(metadef_properties.c.namespace_id == row['namespace_id'])
.where(metadef_properties.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_properties.c.id, String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_properties.c.id))
s.execute()
# Fix duplicate metadef_tags
dbrecs = _upgrade_metadef_tags_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_tags.update()
.where(metadef_tags.c.id > row['id'])
.where(metadef_tags.c.namespace_id == row['namespace_id'])
.where(metadef_tags.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_tags.c.id, String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_tags.c.id))
s.execute()
# Fix duplicate metadef_resource_types
dbrecs = _upgrade_metadef_resource_types_get_duplicates(migrate_engine)
for row in dbrecs:
s = (metadef_resource_types.update()
.where(metadef_resource_types.c.id > row['id'])
.where(metadef_resource_types.c.name == str(row['name']))
)
if migrate_engine.name == 'sqlite':
s = (s.values(name=(row['name'] + '-DUPL-' +
type_coerce(metadef_resource_types.c.id,
String)))
)
else:
s = s.values(name=func.concat(row['name'], '-DUPL-',
metadef_resource_types.c.id))
s.execute()
def _update_sqlite_namespace_id_name_constraint(metadef, metadef_namespaces,
new_constraint_name,
new_fk_name):
migrate.UniqueConstraint(
metadef.c.namespace_id, metadef.c.name).drop()
migrate.UniqueConstraint(
metadef.c.namespace_id, metadef.c.name,
name=new_constraint_name).create()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=new_fk_name).create()
def _downgrade_sqlite_namespace_id_name_constraint(metadef,
metadef_namespaces,
constraint_name,
fk_name):
migrate.UniqueConstraint(
metadef.c.namespace_id,
metadef.c.name,
name=constraint_name).drop()
migrate.UniqueConstraint(
metadef.c.namespace_id,
metadef.c.name).create()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_name).drop()
migrate.ForeignKeyConstraint(
[metadef.c.namespace_id],
[metadef_namespaces.c.id]).create()
def _drop_unique_constraint_if_exists(inspector, table_name, metadef):
name = _get_unique_constraint_name(inspector,
table_name,
['namespace_id', 'name'])
if name:
migrate.UniqueConstraint(metadef.c.namespace_id,
metadef.c.name,
name=name).drop()
def _drop_index_with_fk_constraint(metadef, metadef_namespaces,
index_name,
fk_old_name, fk_new_name):
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_old_name)
fkc.drop()
if index_name:
Index(index_name, metadef.c.namespace_id).drop()
# Rename the fk for consistency across all db's
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_new_name)
fkc.create()
def _downgrade_constraint_with_fk(metadef, metadef_namespaces,
constraint_name,
fk_curr_name, fk_next_name):
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_curr_name)
fkc.drop()
migrate.UniqueConstraint(metadef.c.namespace_id, metadef.c.name,
name=constraint_name).drop()
fkc = migrate.ForeignKeyConstraint([metadef.c.namespace_id],
[metadef_namespaces.c.id],
name=fk_next_name)
fkc.create()
def _get_unique_constraint_name(inspector, table_name, columns):
constraints = inspector.get_unique_constraints(table_name)
for constraint in constraints:
if set(constraint['column_names']) == set(columns):
return constraint['name']
return None
def _get_fk_constraint_name(inspector, table_name, columns):
constraints = inspector.get_foreign_keys(table_name)
for constraint in constraints:
if set(constraint['constrained_columns']) == set(columns):
return constraint['name']
return None
def upgrade(migrate_engine):
_upgrade_data(migrate_engine)
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
inspector = inspect(migrate_engine)
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
# Drop the bad, non-unique indices.
if migrate_engine.name == 'sqlite':
# For sqlite:
# Only after the unique constraints have been added should the indices
# be dropped. If done the other way, sqlite complains during
# constraint adding/dropping that the index does/does not exist.
# Note: The _get_unique_constraint_name, _get_fk_constraint_name
# return None for constraints that do in fact exist. Also,
# get_index_names returns names, but, the names can not be used with
# the Index(name, blah).drop() command, so, putting sqlite into
# it's own section.
# Objects
_update_sqlite_namespace_id_name_constraint(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1')
# Properties
_update_sqlite_namespace_id_name_constraint(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1')
# Tags
_update_sqlite_namespace_id_name_constraint(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace).drop()
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').create()
# ResourceTypes
migrate.UniqueConstraint(
metadef_resource_types.c.name).drop()
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').create()
# Now drop the bad indices
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id,
metadef_objects.c.name).drop()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id,
metadef_properties.c.name).drop()
Index('ix_metadef_tags_namespace_id',
metadef_tags.c.namespace_id,
metadef_tags.c.name).drop()
else:
# First drop the bad non-unique indices.
# To do that (for mysql), must first drop foreign key constraints
# BY NAME and then drop the bad indices.
# Finally, re-create the foreign key constraints with a consistent
# name.
# DB2 still has unique constraints, but, they are badly named.
# Drop them, they will be recreated at the final step.
name = _get_unique_constraint_name(inspector, 'metadef_namespaces',
['namespace'])
if name:
migrate.UniqueConstraint(metadef_namespaces.c.namespace,
name=name).drop()
_drop_unique_constraint_if_exists(inspector, 'metadef_objects',
metadef_objects)
_drop_unique_constraint_if_exists(inspector, 'metadef_properties',
metadef_properties)
_drop_unique_constraint_if_exists(inspector, 'metadef_tags',
metadef_tags)
name = _get_unique_constraint_name(inspector, 'metadef_resource_types',
['name'])
if name:
migrate.UniqueConstraint(metadef_resource_types.c.name,
name=name).drop()
# Objects
_drop_index_with_fk_constraint(
metadef_objects, metadef_namespaces,
'ix_metadef_objects_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_objects', ['namespace_id']),
'metadef_objects_fk_1')
# Properties
_drop_index_with_fk_constraint(
metadef_properties, metadef_namespaces,
'ix_metadef_properties_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_properties', ['namespace_id']),
'metadef_properties_fk_1')
# Tags
_drop_index_with_fk_constraint(
metadef_tags, metadef_namespaces,
'ix_metadef_tags_namespace_id',
_get_fk_constraint_name(
inspector, 'metadef_tags', ['namespace_id']),
'metadef_tags_fk_1')
# Drop Others without fk constraints.
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).drop()
# The next two don't exist in ibm_db_sa, but, drop them everywhere else.
if migrate_engine.name != 'ibm_db_sa':
Index('ix_metadef_resource_types_name',
metadef_resource_types.c.name).drop()
# Not needed due to primary key on same columns
Index('ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id).drop()
# Now, add back the dropped indexes as unique constraints
if migrate_engine.name != 'sqlite':
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').create()
# Objects
migrate.UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='uq_metadef_objects_namespace_id_name').create()
# Properties
migrate.UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='uq_metadef_properties_namespace_id_name').create()
# Tags
migrate.UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name,
name='uq_metadef_tags_namespace_id_name').create()
# Resource Types
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').create()
def downgrade(migrate_engine):
meta = sqlalchemy.MetaData()
meta.bind = migrate_engine
# ORM tables
metadef_namespaces = Table('metadef_namespaces', meta, autoload=True)
metadef_objects = Table('metadef_objects', meta, autoload=True)
metadef_properties = Table('metadef_properties', meta, autoload=True)
metadef_tags = Table('metadef_tags', meta, autoload=True)
metadef_resource_types = Table('metadef_resource_types', meta,
autoload=True)
metadef_ns_res_types = Table('metadef_namespace_resource_types',
meta, autoload=True)
# Drop the unique constraints
if migrate_engine.name == 'sqlite':
# Objects
_downgrade_sqlite_namespace_id_name_constraint(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1')
# Properties
_downgrade_sqlite_namespace_id_name_constraint(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1')
# Tags
_downgrade_sqlite_namespace_id_name_constraint(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').drop()
migrate.UniqueConstraint(
metadef_namespaces.c.namespace).create()
# ResourceTypes
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').drop()
migrate.UniqueConstraint(
metadef_resource_types.c.name).create()
else:
# For mysql, must drop foreign key constraints before dropping the
# unique constraint. So drop the fkc, then drop the constraints,
# then recreate the fkc.
# Objects
_downgrade_constraint_with_fk(
metadef_objects, metadef_namespaces,
'uq_metadef_objects_namespace_id_name',
'metadef_objects_fk_1', None)
# Properties
_downgrade_constraint_with_fk(
metadef_properties, metadef_namespaces,
'uq_metadef_properties_namespace_id_name',
'metadef_properties_fk_1', None)
# Tags
_downgrade_constraint_with_fk(
metadef_tags, metadef_namespaces,
'uq_metadef_tags_namespace_id_name',
'metadef_tags_fk_1', 'metadef_tags_namespace_id_fkey')
# Namespaces
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='uq_metadef_namespaces_namespace').drop()
# Resource_types
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='uq_metadef_resource_types_name').drop()
# Create dropped unique constraints as bad, non-unique indexes
Index('ix_metadef_objects_namespace_id',
metadef_objects.c.namespace_id).create()
Index('ix_metadef_properties_namespace_id',
metadef_properties.c.namespace_id).create()
# These need to be done before the metadef_tags and metadef_namespaces
# unique constraints are created to avoid 'tuple out of range' errors
# in db2.
Index('ix_metadef_tags_namespace_id',
metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
Index('ix_metadef_namespaces_namespace',
metadef_namespaces.c.namespace).create()
# Create these everywhere, except for db2
if migrate_engine.name != 'ibm_db_sa':
Index('ix_metadef_resource_types_name',
metadef_resource_types.c.name).create()
Index('ix_metadef_ns_res_types_res_type_id_ns_id',
metadef_ns_res_types.c.resource_type_id,
metadef_ns_res_types.c.namespace_id).create()
else:
# Recreate the badly named unique constraints in db2
migrate.UniqueConstraint(
metadef_namespaces.c.namespace,
name='ix_namespaces_namespace').create()
migrate.UniqueConstraint(
metadef_objects.c.namespace_id,
metadef_objects.c.name,
name='ix_objects_namespace_id_name').create()
migrate.UniqueConstraint(
metadef_properties.c.namespace_id,
metadef_properties.c.name,
name='ix_metadef_properties_namespace_id_name').create()
migrate.UniqueConstraint(
metadef_tags.c.namespace_id,
metadef_tags.c.name).create()
migrate.UniqueConstraint(
metadef_resource_types.c.name,
name='ix_metadef_resource_types_name').create()
| apache-2.0 | 3,555,000,250,370,857,500 | 39.5 | 79 | 0.576036 | false |
RefugeeMatchmaking/HackZurich | GAE_Playground/libs/networkx/tests/test_convert_numpy.py | 29 | 8642 | from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_equal
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
from networkx.testing.utils import assert_graphs_equal
class TestConvertNumpy(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
global np_assert_equal
try:
import numpy as np
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('NumPy not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_numpy_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A=np.array([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_numpy_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to matrix to graph."
A = nx.to_numpy_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_matrix(self.G1)
A = np.asarray(A)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"""Conversion from digraph to matrix to digraph."""
A = nx.to_numpy_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_matrix(self.G2)
A = np.asarray(A)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to matrix to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
A = np.asarray(A)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to matrix to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
A = np.asarray(A)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_numpy_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
P4 = path_graph(4)
A = nx.to_numpy_matrix(P4)
np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
def test_from_numpy_matrix_type(self):
A=np.matrix([[1]])
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),int)
A=np.matrix([[1]]).astype(np.float)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),float)
A=np.matrix([[1]]).astype(np.str)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),str)
A=np.matrix([[1]]).astype(np.bool)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),bool)
A=np.matrix([[1]]).astype(np.complex)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),complex)
A=np.matrix([[1]]).astype(np.object)
assert_raises(TypeError,nx.from_numpy_matrix,A)
def test_from_numpy_matrix_dtype(self):
dt=[('weight',float),('cost',int)]
A=np.matrix([[(1.0,2)]],dtype=dt)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),float)
assert_equal(type(G[0][0]['cost']),int)
assert_equal(G[0][0]['cost'],2)
assert_equal(G[0][0]['weight'],1.0)
def test_to_numpy_recarray(self):
G=nx.Graph()
G.add_edge(1,2,weight=7.0,cost=5)
A=nx.to_numpy_recarray(G,dtype=[('weight',float),('cost',int)])
assert_equal(sorted(A.dtype.names),['cost','weight'])
assert_equal(A.weight[0,1],7.0)
assert_equal(A.weight[0,0],0.0)
assert_equal(A.cost[0,1],5)
assert_equal(A.cost[0,0],0)
def test_numpy_multigraph(self):
G=nx.MultiGraph()
G.add_edge(1,2,weight=7)
G.add_edge(1,2,weight=70)
A=nx.to_numpy_matrix(G)
assert_equal(A[1,0],77)
A=nx.to_numpy_matrix(G,multigraph_weight=min)
assert_equal(A[1,0],7)
A=nx.to_numpy_matrix(G,multigraph_weight=max)
assert_equal(A[1,0],70)
def test_from_numpy_matrix_parallel_edges(self):
"""Tests that the :func:`networkx.from_numpy_matrix` function
interprets integer weights as the number of parallel edges when
creating a multigraph.
"""
A = np.matrix([[1, 1], [1, 2]])
# First, with a simple graph, each integer entry in the adjacency
# matrix is interpreted as the weight of a single edge in the graph.
expected = nx.DiGraph()
edges = [(0, 0), (0, 1), (1, 0)]
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
expected.add_edge(1, 1, weight=2)
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.DiGraph())
assert_graphs_equal(actual, expected)
# Now each integer entry in the adjacency matrix is interpreted as the
# number of parallel edges in the graph if the appropriate keyword
# argument is specified.
edges = [(0, 0), (0, 1), (1, 0), (1, 1), (1, 1)]
expected = nx.MultiDiGraph()
expected.add_weighted_edges_from([(u, v, 1) for (u, v) in edges])
actual = nx.from_numpy_matrix(A, parallel_edges=True,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
expected = nx.MultiDiGraph()
expected.add_edges_from(set(edges), weight=1)
# The sole self-loop (edge 0) on vertex 1 should have weight 2.
expected[1][1][0]['weight'] = 2
actual = nx.from_numpy_matrix(A, parallel_edges=False,
create_using=nx.MultiDiGraph())
assert_graphs_equal(actual, expected)
def test_symmetric(self):
"""Tests that a symmetric matrix has edges added only once to an
undirected multigraph when using :func:`networkx.from_numpy_matrix`.
"""
A = np.matrix([[0, 1], [1, 0]])
G = nx.from_numpy_matrix(A, create_using=nx.MultiGraph())
expected = nx.MultiGraph()
expected.add_edge(0, 1, weight=1)
assert_graphs_equal(G, expected)
| mit | -7,473,892,593,940,159,000 | 38.281818 | 82 | 0.595233 | false |
samrussell/ryu | tools/install_venv.py | 56 | 4160 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2010 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Installation script for Quantum's development virtualenv
"""
import os
import subprocess
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires')
PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1])
VENV_EXISTS = bool(os.path.exists(VENV))
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
raise Exception('Command "%s" failed.\n%s' % (' '.join(cmd), output))
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'],
check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'],
check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
raise Exception('Virtualenv not found. ' + \
'Try installing python-virtualenv')
print 'done.'
def create_virtualenv(venv=VENV, install_pip=False):
"""Creates the virtual environment and installs PIP only into the
virtual environment
"""
print 'Creating venv...',
install = ['virtualenv', '-q', venv]
run_command(install)
print 'done.'
print 'Installing pip in virtualenv...',
if install_pip and \
not run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']):
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV):
print 'Installing dependencies with pip (this can take a while)...'
run_command(['tools/with_venv.sh', 'pip', 'install', '-r',
PIP_REQUIRES], redirect_output=False)
run_command(['tools/with_venv.sh', 'pip', 'install', '-r',
TEST_REQUIRES], redirect_output=False)
# Tell the virtual env how to "import quantum"
pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages",
"quantum.pth")
f = open(pthfile, 'w')
f.write("%s\n" % ROOT)
def print_help():
help = """
Quantum development environment setup is complete.
Quantum development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Quantum virtualenv for the extent of your current shell
session you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
def main(argv):
check_dependencies()
create_virtualenv()
install_dependencies()
print_help()
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 | 6,134,830,222,872,332,000 | 29.588235 | 78 | 0.651923 | false |
LCAS/ros_web_apis | ms_face_api/src/ms_face_api/person_group.py | 1 | 4140 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: person_group.py
Description: Person Group section of the Cognitive Face API.
"""
from . import util
def create(person_group_id, name=None, user_data=None):
"""Create a new person group with specified `person_group_id`, `name` and
user-provided `user_data`.
Args:
person_group_id: User-provided `person_group_id` as a string. The valid
characters include numbers, English letters in lower case, '-' and
'_'. The maximum length of the personGroupId is 64.i
name: Person group display name. The maximum length is 128.
user_data: User-provided data attached to the person group. The size
limit is 16KB.
Returns:
An empty response body.
"""
name = person_group_id if name is None else name
url = 'persongroups/{}'.format(person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PUT', url, json=json)
def delete(person_group_id):
"""Delete an existing person group. Persisted face images of all people in
the person group will also be deleted.
Args:
person_group_id: The `person_group_id` of the person group to be
deleted.
Returns:
An empty response body.
"""
url = 'persongroups/{}'.format(person_group_id)
return util.request('DELETE', url)
def get(person_group_id):
"""Retrieve the information of a person group, including its `name` and
`user_data`. This API returns person group information only, use
`person.lists` instead to retrieve person information under the person
group.
Args:
person_group_id: `person_group_id` of the target person group.
Returns:
The person group's information.
"""
url = 'persongroups/{}'.format(person_group_id)
return util.request('GET', url)
def get_status(person_group_id):
"""Retrieve the training status of a person group (completed or ongoing).
Training can be triggered by `person_group.train`. The training will
process for a while on the server side.
Args:
person_group_id: `person_group_id` of the target person group.
Returns:
The person group's training status.
"""
url = 'persongroups/{}/training'.format(person_group_id)
return util.request('GET', url)
def lists(start=None, top=None):
"""List person groups and their information.
Args:
start: Optional parameter. List person groups from the least
`person_group_id` greater than the "start". It contains no more
than 64 characters. Default is empty.
top: The number of person groups to list, ranging in [1, 1000]. Default
is 1000.
Returns:
An array of person groups and their information (`person_group_id`,
`name` and `user_data`).
"""
url = 'persongroups'
params = {
'start': start,
'top': top,
}
return util.request('GET', url, params=params)
def train(person_group_id):
"""Queue a person group training task, the training task may not be started
immediately.
Args:
person_group_id: Target person group to be trained.
Returns:
An empty JSON body.
"""
url = 'persongroups/{}/train'.format(person_group_id)
return util.request('POST', url)
def update(person_group_id, name=None, user_data=None):
"""Update an existing person group's display `name` and `user_data`. The
properties which does not appear in request body will not be updated.
Args:
person_group_id: `person_group_id` of the person group to be updated.
name: Optional parameter. Person group display name. The maximum length
is 128.
user_data: Optional parameter. User-provided data attached to the
person group. The size limit is 16KB.
Returns:
An empty response body.
"""
url = 'persongroups/{}'.format(person_group_id)
json = {
'name': name,
'userData': user_data,
}
return util.request('PATCH', url, json=json)
| mit | 2,872,815,642,897,212,400 | 28.15493 | 79 | 0.63913 | false |
akarki15/mozillians | vendor-local/lib/python/celery/tests/test_events/__init__.py | 14 | 6064 | from __future__ import absolute_import
from __future__ import with_statement
import socket
from celery import events
from celery.app import app_or_default
from celery.tests.utils import Case
class MockProducer(object):
raise_on_publish = False
def __init__(self, *args, **kwargs):
self.sent = []
def publish(self, msg, *args, **kwargs):
if self.raise_on_publish:
raise KeyError()
self.sent.append(msg)
def close(self):
pass
def has_event(self, kind):
for event in self.sent:
if event["type"] == kind:
return event
return False
class TestEvent(Case):
def test_constructor(self):
event = events.Event("world war II")
self.assertEqual(event["type"], "world war II")
self.assertTrue(event["timestamp"])
class TestEventDispatcher(Case):
def setUp(self):
self.app = app_or_default()
def test_send(self):
producer = MockProducer()
eventer = self.app.events.Dispatcher(object(), enabled=False)
eventer.publisher = producer
eventer.enabled = True
eventer.send("World War II", ended=True)
self.assertTrue(producer.has_event("World War II"))
eventer.enabled = False
eventer.send("World War III")
self.assertFalse(producer.has_event("World War III"))
evs = ("Event 1", "Event 2", "Event 3")
eventer.enabled = True
eventer.publisher.raise_on_publish = True
eventer.buffer_while_offline = False
with self.assertRaises(KeyError):
eventer.send("Event X")
eventer.buffer_while_offline = True
for ev in evs:
eventer.send(ev)
eventer.publisher.raise_on_publish = False
eventer.flush()
for ev in evs:
self.assertTrue(producer.has_event(ev))
def test_enabled_disable(self):
connection = self.app.broker_connection()
channel = connection.channel()
try:
dispatcher = self.app.events.Dispatcher(connection,
enabled=True)
dispatcher2 = self.app.events.Dispatcher(connection,
enabled=True,
channel=channel)
self.assertTrue(dispatcher.enabled)
self.assertTrue(dispatcher.publisher.channel)
self.assertEqual(dispatcher.publisher.serializer,
self.app.conf.CELERY_EVENT_SERIALIZER)
created_channel = dispatcher.publisher.channel
dispatcher.disable()
dispatcher.disable() # Disable with no active publisher
dispatcher2.disable()
self.assertFalse(dispatcher.enabled)
self.assertIsNone(dispatcher.publisher)
self.assertTrue(created_channel.closed)
self.assertFalse(dispatcher2.channel.closed,
"does not close manually provided channel")
dispatcher.enable()
self.assertTrue(dispatcher.enabled)
self.assertTrue(dispatcher.publisher)
finally:
channel.close()
connection.close()
class TestEventReceiver(Case):
def setUp(self):
self.app = app_or_default()
def test_process(self):
message = {"type": "world-war"}
got_event = [False]
def my_handler(event):
got_event[0] = True
r = events.EventReceiver(object(),
handlers={"world-war": my_handler},
node_id="celery.tests",
)
r._receive(message, object())
self.assertTrue(got_event[0])
def test_catch_all_event(self):
message = {"type": "world-war"}
got_event = [False]
def my_handler(event):
got_event[0] = True
r = events.EventReceiver(object(), node_id="celery.tests")
events.EventReceiver.handlers["*"] = my_handler
try:
r._receive(message, object())
self.assertTrue(got_event[0])
finally:
events.EventReceiver.handlers = {}
def test_itercapture(self):
connection = self.app.broker_connection()
try:
r = self.app.events.Receiver(connection, node_id="celery.tests")
it = r.itercapture(timeout=0.0001, wakeup=False)
consumer = it.next()
self.assertTrue(consumer.queues)
self.assertEqual(consumer.callbacks[0], r._receive)
with self.assertRaises(socket.timeout):
it.next()
with self.assertRaises(socket.timeout):
r.capture(timeout=0.00001)
finally:
connection.close()
def test_itercapture_limit(self):
connection = self.app.broker_connection()
channel = connection.channel()
try:
events_received = [0]
def handler(event):
events_received[0] += 1
producer = self.app.events.Dispatcher(connection,
enabled=True,
channel=channel)
r = self.app.events.Receiver(connection,
handlers={"*": handler},
node_id="celery.tests")
evs = ["ev1", "ev2", "ev3", "ev4", "ev5"]
for ev in evs:
producer.send(ev)
it = r.itercapture(limit=4, wakeup=True)
it.next() # skip consumer (see itercapture)
list(it)
self.assertEqual(events_received[0], 4)
finally:
channel.close()
connection.close()
class test_misc(Case):
def setUp(self):
self.app = app_or_default()
def test_State(self):
state = self.app.events.State()
self.assertDictEqual(dict(state.workers), {})
| bsd-3-clause | -134,159,240,186,236,400 | 30.748691 | 76 | 0.54535 | false |
robwebset/script.sonos | service.py | 1 | 22409 | # -*- coding: utf-8 -*-
import sys
import os
import traceback
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
# Add JSON support for queries
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
# Import the common settings
from resources.lib.settings import Settings
from resources.lib.settings import log
from resources.lib.settings import SocoLogging
from resources.lib.sonos import Sonos
# Need to make sure that we override the SoCo class with the Sonos one
from resources.lib.soco import config
config.SOCO_CLASS = Sonos
from resources.lib.soco import discover
ADDON = xbmcaddon.Addon(id='script.sonos')
CWD = ADDON.getAddonInfo('path').decode("utf-8")
ICON = ADDON.getAddonInfo('icon')
RES_DIR = xbmc.translatePath(os.path.join(CWD, 'resources').encode("utf-8")).decode("utf-8")
##########################################################
# Class to display a popup of what is currently playing
##########################################################
class SonosPlayingPopup(xbmcgui.WindowXMLDialog):
ICON = 400
LABEL1 = 401
LABEL2 = 402
LABEL3 = 403
def __init__(self, *args, **kwargs):
# Copy off the key-word arguments
# The non keyword arguments will be the ones passed to the main WindowXML
self.artist = kwargs.pop('artist')
self.album = kwargs.pop('album')
self.title = kwargs.pop('title')
self.albumArt = kwargs.pop('albumArt')
# Static method to create the Window Dialog class
@staticmethod
def createSonosPlayingPopup(track):
# Creating popup for
log("SonosPlayingPopup: Currently playing artist = %s, album = %s, track = %s" % (track['artist'], track['album'], track['title']))
# Get the album art if it is set (Default to the Sonos icon)
albumArt = ICON
if track['album_art'] != "":
albumArt = track['album_art']
return SonosPlayingPopup("script-sonos-notif-popup.xml", CWD, artist=track['artist'], album=track['album'], title=track['title'], albumArt=albumArt)
def onInit(self):
# Need to populate the popup with the artist details
label1 = self.getControl(SonosPlayingPopup.LABEL1)
label1.addLabel(self.artist)
label2 = self.getControl(SonosPlayingPopup.LABEL2)
label2.addLabel(self.album)
label3 = self.getControl(SonosPlayingPopup.LABEL3)
label3.addLabel(self.title)
icon = self.getControl(SonosPlayingPopup.ICON)
icon.setImage(self.albumArt)
xbmcgui.WindowXMLDialog.onInit(self)
def showPopup(self):
self.show()
xbmc.sleep(Settings.getNotificationDisplayDuration())
self.close()
#########################################
# Links the Sonos Volume to that of Kodi
#########################################
class SonosVolumeLink():
def __init__(self, sonosDevice):
self.sonosDevice = sonosDevice
self.sonosVolume = 0
self.sonosMuted = False
self.xbmcPlayingProcessed = False
# On Startup check to see if we need to switch the Sonos speaker to line-in
if Settings.switchSonosToLineIn():
self._switchToLineIn()
def updateSonosVolume(self):
# Check to see if the Sonos Volume Link is Enabled
if not Settings.linkAudioWithSonos():
return
# Get the current Kodi Volume
xbmcVolume, xbmcMuted = self._getXbmcVolume()
log("SonosVolumeLink: xbmcVolume = %d, selfvol = %d" % (xbmcVolume, self.sonosVolume))
# Check to see if it has changed, and if we need to change the sonos value
if (xbmcVolume != -1) and (xbmcVolume != self.sonosVolume):
log("SonosVolumeLink: Setting volume to = %d" % xbmcVolume)
sonosDevice.setGroupVolume(xbmcVolume, True)
self.sonosVolume = xbmcVolume
# Check to see if Kodi has been muted
if (xbmcMuted != -1) and (xbmcMuted != self.sonosMuted):
sonosDevice.fullMute(xbmcMuted)
self.sonosMuted = xbmcMuted
# This will return the volume in a range of 0-100
def _getXbmcVolume(self):
result = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "Application.GetProperties", "params": { "properties": [ "volume", "muted" ] }, "id": 1}')
json_query = simplejson.loads(result)
volume = -1
if ("result" in json_query) and ('volume' in json_query['result']):
# Get the volume value
volume = json_query['result']['volume']
muted = None
if ("result" in json_query) and ('muted' in json_query['result']):
# Get the volume value
muted = json_query['result']['muted']
log("SonosVolumeLink: current volume: %s%%" % str(volume))
return volume, muted
def _switchToLineIn(self):
# Check if we need to ensure the Sonos system is using the line-in
try:
# Not all speakers support line-in - so catch exception
self.sonosDevice.switch_to_line_in()
# Once switch to line in, some systems require that a play command is sent
self.sonosDevice.play()
except:
log("SonosService: Failed to switch to Line-In for speaker %s" % Settings.getIPAddress())
log("SonosService: %s" % traceback.format_exc())
def switchToLineInIfXmbcPlaying(self):
# Check if we need to switch to line in every time media starts playing
if Settings.switchSonosToLineInOnMediaStart():
# Check to see if something has started playing
if xbmc.Player().isPlaying():
# Check if we have already processed that something is playing
if self.xbmcPlayingProcessed is False:
self.xbmcPlayingProcessed = True
log("SonosService: Switching to line-in because media started")
# Switch to line-in
self._switchToLineIn()
else:
# No longer playing, so need to process the next change
self.xbmcPlayingProcessed = False
#########################################
# Redirects the volume controls to Sonos
#########################################
class SonosVolumeRedirect():
def __init__(self, sonosDevice):
self.sonosDevice = sonosDevice
self.KEYMAP_PATH = xbmc.translatePath(os.path.join(RES_DIR, "keymaps"))
self.KEYMAPSOURCEFILE = os.path.join(self.KEYMAP_PATH, "sonos_volume_keymap.xml")
self.KEYMAPDESTFILE = os.path.join(xbmc.translatePath('special://userdata/keymaps'), "sonos_volume_keymap.xml")
self.volumeChangeNotification = -1
if Settings.redirectVolumeControls():
self._enableKeymap()
else:
self._cleanupKeymap()
def checkVolumeChange(self):
# Check to see if the Sonos Volume Redirect is Enabled
if not Settings.redirectVolumeControls():
return
self.volumeChangeNotification = self.volumeChangeNotification - 1
redirect = xbmcgui.Window(10000).getProperty("SonosVolumeRedirect")
while redirect not in [None, ""]:
xbmcgui.Window(10000).clearProperty("SonosVolumeRedirect")
volumeChange = 0
isMute = False
if redirect.lower() == "up":
volumeChange = Settings.getVolumeChangeIncrements()
elif redirect.lower() == "down":
volumeChange = Settings.getVolumeChangeIncrements() * -1
elif redirect.lower() == "mute":
isMute = True
log("SonosVolumeRedirect: Changing by %d" % volumeChange)
# Check to see if it has changed, and if we need to change the sonos value
if isMute:
# Check the current muted state
if sonosDevice.mute:
sonosDevice.fullMute(False)
else:
sonosDevice.fullMute(True)
self.volumeChangeNotification = Settings.getChecksPerSecond() * 2
elif volumeChange != 0:
sonosDevice.setGroupVolume(sonosDevice.volume + volumeChange, True)
self.volumeChangeNotification = Settings.getChecksPerSecond() * 2
redirect = xbmcgui.Window(10000).getProperty("SonosVolumeRedirect")
# Check if we have started changing the volume and have now stopped
# for a little while
if self.volumeChangeNotification == 0:
self.volumeChangeNotification = -1
if sonosDevice.mute:
xbmcgui.Dialog().notification(ADDON.getLocalizedString(32074), ADDON.getLocalizedString(32075), ICON, 2000, False)
else:
displayMsg = "%d" % sonosDevice.volume
xbmcgui.Dialog().notification(ADDON.getLocalizedString(32074), displayMsg, ICON, 2000, False)
def cleanup(self):
if Settings.redirectVolumeControls():
self._cleanupKeymap()
# Copies the Sonos keymap to the correct location and loads it
def _enableKeymap(self):
try:
xbmcvfs.copy(self.KEYMAPSOURCEFILE, self.KEYMAPDESTFILE)
xbmc.executebuiltin('Action(reloadkeymaps)')
log("SonosVolumeRedirect: Installed custom keymap")
except:
log("SonosVolumeRedirect: Failed to copy & load custom keymap: %s" % traceback.format_exc(), xbmc.LOGERROR)
# Removes the Sonos keymap
def _cleanupKeymap(self):
if xbmcvfs.exists(self.KEYMAPDESTFILE):
try:
xbmcvfs.delete(self.KEYMAPDESTFILE)
log("SonosVolumeRedirect: Removed custom keymap")
except:
log("SonosVolumeRedirect: Failed to remove & load custom keymap: %s" % traceback.format_exc(), xbmc.LOGERROR)
# Force a re-load
xbmc.executebuiltin('Action(reloadkeymaps)')
##############################################################
# Automatically Pauses Sonos if Kodi starts playing something
##############################################################
class SonosAutoPause():
def __init__(self, sonosDevice):
self.sonosDevice = sonosDevice
self.xbmcPlayState = False
self.autoStopped = False
self.resumeCountdown = Settings.autoResumeSonos()
# Check if the Sonos system should be paused or resumed
def check(self):
if Settings.autoPauseSonos() and not Settings.linkAudioWithSonos():
try:
# Check to see if something has started playing
if xbmc.Player().isPlaying():
# If this is a change in play state since the last time we checked
if self.xbmcPlayState is False:
log("SonosAutoPause: Automatically pausing Sonos")
self.xbmcPlayState = True
# Pause the sonos if it is playing
if self._isSonosPlaying():
self.sonosDevice.pause()
self.autoStopped = True
self.resumeCountdown = Settings.autoResumeSonos()
else:
self.xbmcPlayState = False
if Settings.autoResumeSonos() > 0 and self.autoStopped:
if self.resumeCountdown > 0:
self.resumeCountdown = self.resumeCountdown - 1
else:
log("SonosAutoPause: Automatically resuming Sonos")
self.sonosDevice.play()
self.autoStopped = False
self.resumeCountdown = Settings.autoResumeSonos()
except:
# If we fail to stop the speaker playing, it may be because
# there is a network problem or the speaker is powered down
# So we just continue after logging the error
log("SonosAutoPause: Error from speaker %s" % Settings.getIPAddress())
log("SonosAutoPause: %s" % traceback.format_exc())
# Works out if the Sonos system is playing
def _isSonosPlaying(self):
playStatus = self.sonosDevice.get_current_transport_info()
sonosPlaying = False
if (playStatus is not None) and (playStatus['current_transport_state'] == 'PLAYING'):
sonosPlaying = True
return sonosPlaying
#################################################
# Sets the IP Address based off of the Zone Name
#################################################
class AutoUpdateIPAddress():
def __init__(self):
# Check if the auto update IP is enabled
if not Settings.isAutoIpUpdateEnabled():
return
# Get the existing zone we are trying to set the IP Address for
existingZone = Settings.getZoneName()
# Nothing to do if there is no Zone name set
if (existingZone is None) or (existingZone == ""):
return
# Set up the logging before using the Sonos Device
SocoLogging.enable()
try:
sonos_devices = discover()
except:
log("AutoUpdateIPAddress: Exception when getting devices")
log("AutoUpdateIPAddress: %s" % traceback.format_exc())
sonos_devices = []
if sonos_devices is None:
log("AutoUpdateIPAddress: Failed to find any devices")
sonos_devices = []
ipaddresses = []
# Check each of the devices found
for device in sonos_devices:
ip = device.ip_address
log("AutoUpdateIPAddress: Getting info for IP address %s" % ip)
playerInfo = None
# Try and get the player info, if it fails then it is not a valid
# player and we should continue to the next
try:
playerInfo = device.get_speaker_info()
except:
log("AutoUpdateIPAddress: IP address %s is not a valid player" % ip)
log("AutoUpdateIPAddress: %s" % traceback.format_exc())
continue
# If player info was found, then print it out
if playerInfo is not None:
# What is the name of the zone that this speaker is in?
zone_name = playerInfo['zone_name']
# Check the zone against the ones we are looking for
if zone_name == existingZone:
# There could be multiple IP addressing in the same group
# so save them all
log("AutoUpdateIPAddress: IP address %s in zone %s" % (ip, existingZone))
ipaddresses.append(ip)
# Check if there is an IP Address to set
if len(ipaddresses) > 0:
oldIp = Settings.getIPAddress()
# Check if we already have a match to the existing IP Address
matchesExisting = False
for newIp in ipaddresses:
if newIp == oldIp:
matchesExisting = True
break
# If no match found - then set to the first IP Address
if not matchesExisting:
log("AutoUpdateIPAddress: Setting IP address to %s" % ipaddresses[0])
Settings.setIPAddress(ipaddresses[0])
################################
# Main of the Sonos Service
################################
if __name__ == '__main__':
log("SonosService: Starting service (version %s)" % ADDON.getAddonInfo('version'))
# Start by doing any auto-setting of the IP Address
autoIpAdd = AutoUpdateIPAddress()
del autoIpAdd
# Check for the list of things that impact audio
audioChanges = Settings.linkAudioWithSonos() or Settings.switchSonosToLineIn() or Settings.switchSonosToLineInOnMediaStart()
# Check to see if we need to launch the Sonos Controller as soon as Kodi starts
if Settings.autoLaunchControllerOnStartup():
# Launch the Sonos controller, but do not block as we have more to do as a service
log("SonosService: Launching controller on startup")
xbmc.executebuiltin('RunScript(%s)' % (os.path.join(CWD, "default.py")), False)
if (not Settings.isNotificationEnabled()) and (not audioChanges) and (not Settings.autoPauseSonos()) and (not Settings.redirectVolumeControls()):
log("SonosService: Notifications, Volume Link and Auto Pause are disabled, exiting service")
else:
sonosDevice = Sonos.createSonosDevice()
# Make sure a Sonos speaker was found
if sonosDevice is not None:
timeUntilNextCheck = Settings.getNotificationCheckFrequency() * Settings.getChecksPerSecond()
log("SonosService: Notification Check Frequency = %d" % timeUntilNextCheck)
lastDisplayedTrack = None
# Need to only display the popup when the service starts if there is
# currently something playing
justStartedService = True
# Class to deal with sync of the volume
volumeLink = SonosVolumeLink(sonosDevice)
# Class to deal with redirecting the volume
redirectVolume = SonosVolumeRedirect(sonosDevice)
# Class that handles the automatic pausing of the Sonos system
autoPause = SonosAutoPause(sonosDevice)
# Loop until Kodi exits
while (not xbmc.abortRequested):
# Fist check to see if the Sonos needs to be switched
# to line-in because media has started playing
volumeLink.switchToLineInIfXmbcPlaying()
# Make sure the volume matches
volumeLink.updateSonosVolume()
# Check if a volume change has been made
redirectVolume.checkVolumeChange()
# Now check to see if the Sonos system needs pausing
autoPause.check()
if (timeUntilNextCheck < 1) and Settings.isNotificationEnabled():
if Settings.stopNotifIfVideoPlaying() and xbmc.Player().isPlayingVideo():
log("SonosService: Video Playing, Skipping Notification Display")
elif Settings.stopNotifIfControllerShowing() and (xbmcgui.Window(10000).getProperty("SonosControllerShowing") == 'true'):
log("SonosService: Sonos Controller Showing, Skipping Notification Display")
# Reset the "just started" flag to ensure that when we exit it does not
# show the notification immediately
justStartedService = True
else:
log("SonosService: Notification wait time expired")
try:
# Get the current track that is being played at the moment
track = sonosDevice.get_current_track_info()
# Record if the sonos is currently playing
isActive = True
# Check to see if a new track is playing before displaying the popup
if (track['uri'] == '') or (track['title'] == ''):
track = None
# Also make the last track value None as we don't want
# this seen as a change
lastDisplayedTrack = None
elif justStartedService is True:
# Check if the sonos is currently playing
playStatus = sonosDevice.get_current_transport_info()
if (playStatus is None) or (playStatus['current_transport_state'] != 'PLAYING'):
isActive = False
# Check to see if the playing track has changed
if (track is not None) and ((lastDisplayedTrack is None) or (track['uri'] != lastDisplayedTrack['uri'])):
# Update the last displayed track to the current one
lastDisplayedTrack = track
# Only display the dialog if it is playing
if isActive:
if Settings.useXbmcNotifDialog():
log("SonosService: Currently playing artist = %s, album = %s, track = %s" % (track['artist'], track['album'], track['title']))
# Get the album art if it is set (Default to the Sonos icon)
albumArt = ICON
if track['album_art'] != "":
albumArt = track['album_art']
# Gotham allows you to have a dialog without making a sound
xbmcgui.Dialog().notification(track['artist'], track['title'], albumArt, Settings.getNotificationDisplayDuration(), False)
else:
sonosPopup = SonosPlayingPopup.createSonosPlayingPopup(track)
sonosPopup.showPopup()
del sonosPopup
except:
# Connection failure - may just be a network glitch - so don't exit
log("SonosService: Error from speaker %s" % Settings.getIPAddress())
log("SonosService: %s" % traceback.format_exc())
# No longer the first start
justStartedService = False
# Reset the timer for the next check
timeUntilNextCheck = Settings.getNotificationCheckFrequency() * Settings.getChecksPerSecond()
# Increment the timer and sleep for a second before the next check
xbmc.sleep(1000 / Settings.getChecksPerSecond())
timeUntilNextCheck = timeUntilNextCheck - 1
redirectVolume.cleanup()
del redirectVolume
del volumeLink
del autoPause
log("Sonos: Stopping service")
| gpl-2.0 | 6,117,928,737,935,665,000 | 43.112205 | 166 | 0.573564 | false |
lgarren/spack | var/spack/repos/builtin/packages/r-bh/package.py | 3 | 2628 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RBh(RPackage):
"""Boost provides free peer-reviewed portable C++ source libraries. A large
part of Boost is provided as C++ template code which is resolved entirely
at compile-time without linking. This package aims to provide the most
useful subset of Boost libraries for template use among CRAN package. By
placing these libraries in this package, we offer a more efficient
distribution system for CRAN as replication of this code in the sources of
other packages is avoided. As of release 1.60.0-2, the following Boost
libraries are included: 'algorithm' 'any' 'bimap' 'bind' 'circular_buffer'
'concept' 'config' 'container' 'date'_'time' 'detail' 'dynamic_bitset'
'exception' 'filesystem' 'flyweight' 'foreach' 'functional' 'fusion'
'geometry' 'graph' 'heap' 'icl' 'integer' 'interprocess' 'intrusive' 'io'
'iostreams' 'iterator' 'math' 'move' 'mpl' 'multiprcecision' 'numeric'
'pending' 'phoenix' 'preprocessor' 'random' 'range' 'smart_ptr' 'spirit'
'tuple' 'type_trains' 'typeof' 'unordered' 'utility' 'uuid'."""
homepage = "https://cran.r-project.org/web/packages/BH/index.html"
url = "https://cran.r-project.org/src/contrib/BH_1.65.0-1.tar.gz"
list_url = homepage
version('1.65.0-1', '7d0402188e4af59f4103f36616d6ee55')
version('1.60.0-2', 'b50fdc85285da05add4e9da664a2d551')
| lgpl-2.1 | -8,498,819,118,847,006,000 | 53.75 | 79 | 0.689878 | false |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/IPython/parallel/tests/test_db.py | 2 | 11884 | """Tests for db backends
Authors:
* Min RK
"""
#-------------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------
from __future__ import division
import logging
import os
import tempfile
import time
from datetime import datetime, timedelta
from unittest import TestCase
from IPython.parallel import error
from IPython.parallel.controller.dictdb import DictDB
from IPython.parallel.controller.sqlitedb import SQLiteDB
from IPython.parallel.controller.hub import init_record, empty_record
from IPython.testing import decorators as dec
from IPython.kernel.zmq.session import Session
#-------------------------------------------------------------------------------
# TestCases
#-------------------------------------------------------------------------------
def setup():
global temp_db
temp_db = tempfile.NamedTemporaryFile(suffix='.db').name
class TaskDBTest:
def setUp(self):
self.session = Session()
self.db = self.create_db()
self.load_records(16)
def create_db(self):
raise NotImplementedError
def load_records(self, n=1, buffer_size=100):
"""load n records for testing"""
#sleep 1/10 s, to ensure timestamp is different to previous calls
time.sleep(0.1)
msg_ids = []
for i in range(n):
msg = self.session.msg('apply_request', content=dict(a=5))
msg['buffers'] = [os.urandom(buffer_size)]
rec = init_record(msg)
msg_id = msg['header']['msg_id']
msg_ids.append(msg_id)
self.db.add_record(msg_id, rec)
return msg_ids
def test_add_record(self):
before = self.db.get_history()
self.load_records(5)
after = self.db.get_history()
self.assertEqual(len(after), len(before)+5)
self.assertEqual(after[:-5],before)
def test_drop_record(self):
msg_id = self.load_records()[-1]
rec = self.db.get_record(msg_id)
self.db.drop_record(msg_id)
self.assertRaises(KeyError,self.db.get_record, msg_id)
def _round_to_millisecond(self, dt):
"""necessary because mongodb rounds microseconds"""
micro = dt.microsecond
extra = int(str(micro)[-3:])
return dt - timedelta(microseconds=extra)
def test_update_record(self):
now = self._round_to_millisecond(datetime.now())
#
msg_id = self.db.get_history()[-1]
rec1 = self.db.get_record(msg_id)
data = {'stdout': 'hello there', 'completed' : now}
self.db.update_record(msg_id, data)
rec2 = self.db.get_record(msg_id)
self.assertEqual(rec2['stdout'], 'hello there')
self.assertEqual(rec2['completed'], now)
rec1.update(data)
self.assertEqual(rec1, rec2)
# def test_update_record_bad(self):
# """test updating nonexistant records"""
# msg_id = str(uuid.uuid4())
# data = {'stdout': 'hello there'}
# self.assertRaises(KeyError, self.db.update_record, msg_id, data)
def test_find_records_dt(self):
"""test finding records by date"""
hist = self.db.get_history()
middle = self.db.get_record(hist[len(hist)//2])
tic = middle['submitted']
before = self.db.find_records({'submitted' : {'$lt' : tic}})
after = self.db.find_records({'submitted' : {'$gte' : tic}})
self.assertEqual(len(before)+len(after),len(hist))
for b in before:
self.assertTrue(b['submitted'] < tic)
for a in after:
self.assertTrue(a['submitted'] >= tic)
same = self.db.find_records({'submitted' : tic})
for s in same:
self.assertTrue(s['submitted'] == tic)
def test_find_records_keys(self):
"""test extracting subset of record keys"""
found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertEqual(set(rec.keys()), set(['msg_id', 'submitted', 'completed']))
def test_find_records_msg_id(self):
"""ensure msg_id is always in found records"""
found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted', 'completed'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['submitted'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
found = self.db.find_records({'msg_id': {'$ne' : ''}},keys=['msg_id'])
for rec in found:
self.assertTrue('msg_id' in rec.keys())
def test_find_records_in(self):
"""test finding records with '$in','$nin' operators"""
hist = self.db.get_history()
even = hist[::2]
odd = hist[1::2]
recs = self.db.find_records({ 'msg_id' : {'$in' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(even), set(found))
recs = self.db.find_records({ 'msg_id' : {'$nin' : even}})
found = [ r['msg_id'] for r in recs ]
self.assertEqual(set(odd), set(found))
def test_get_history(self):
msg_ids = self.db.get_history()
latest = datetime(1984,1,1)
for msg_id in msg_ids:
rec = self.db.get_record(msg_id)
newt = rec['submitted']
self.assertTrue(newt >= latest)
latest = newt
msg_id = self.load_records(1)[-1]
self.assertEqual(self.db.get_history()[-1],msg_id)
def test_datetime(self):
"""get/set timestamps with datetime objects"""
msg_id = self.db.get_history()[-1]
rec = self.db.get_record(msg_id)
self.assertTrue(isinstance(rec['submitted'], datetime))
self.db.update_record(msg_id, dict(completed=datetime.now()))
rec = self.db.get_record(msg_id)
self.assertTrue(isinstance(rec['completed'], datetime))
def test_drop_matching(self):
msg_ids = self.load_records(10)
query = {'msg_id' : {'$in':msg_ids}}
self.db.drop_matching_records(query)
recs = self.db.find_records(query)
self.assertEqual(len(recs), 0)
def test_null(self):
"""test None comparison queries"""
msg_ids = self.load_records(10)
query = {'msg_id' : None}
recs = self.db.find_records(query)
self.assertEqual(len(recs), 0)
query = {'msg_id' : {'$ne' : None}}
recs = self.db.find_records(query)
self.assertTrue(len(recs) >= 10)
def test_pop_safe_get(self):
"""editing query results shouldn't affect record [get]"""
msg_id = self.db.get_history()[-1]
rec = self.db.get_record(msg_id)
rec.pop('buffers')
rec['garbage'] = 'hello'
rec['header']['msg_id'] = 'fubar'
rec2 = self.db.get_record(msg_id)
self.assertTrue('buffers' in rec2)
self.assertFalse('garbage' in rec2)
self.assertEqual(rec2['header']['msg_id'], msg_id)
def test_pop_safe_find(self):
"""editing query results shouldn't affect record [find]"""
msg_id = self.db.get_history()[-1]
rec = self.db.find_records({'msg_id' : msg_id})[0]
rec.pop('buffers')
rec['garbage'] = 'hello'
rec['header']['msg_id'] = 'fubar'
rec2 = self.db.find_records({'msg_id' : msg_id})[0]
self.assertTrue('buffers' in rec2)
self.assertFalse('garbage' in rec2)
self.assertEqual(rec2['header']['msg_id'], msg_id)
def test_pop_safe_find_keys(self):
"""editing query results shouldn't affect record [find+keys]"""
msg_id = self.db.get_history()[-1]
rec = self.db.find_records({'msg_id' : msg_id}, keys=['buffers', 'header'])[0]
rec.pop('buffers')
rec['garbage'] = 'hello'
rec['header']['msg_id'] = 'fubar'
rec2 = self.db.find_records({'msg_id' : msg_id})[0]
self.assertTrue('buffers' in rec2)
self.assertFalse('garbage' in rec2)
self.assertEqual(rec2['header']['msg_id'], msg_id)
class TestDictBackend(TaskDBTest, TestCase):
def create_db(self):
return DictDB()
def test_cull_count(self):
self.db = self.create_db() # skip the load-records init from setUp
self.db.record_limit = 20
self.db.cull_fraction = 0.2
self.load_records(20)
self.assertEqual(len(self.db.get_history()), 20)
self.load_records(1)
# 0.2 * 20 = 4, 21 - 4 = 17
self.assertEqual(len(self.db.get_history()), 17)
self.load_records(3)
self.assertEqual(len(self.db.get_history()), 20)
self.load_records(1)
self.assertEqual(len(self.db.get_history()), 17)
for i in range(100):
self.load_records(1)
self.assertTrue(len(self.db.get_history()) >= 17)
self.assertTrue(len(self.db.get_history()) <= 20)
def test_cull_size(self):
self.db = self.create_db() # skip the load-records init from setUp
self.db.size_limit = 1000
self.db.cull_fraction = 0.2
self.load_records(100, buffer_size=10)
self.assertEqual(len(self.db.get_history()), 100)
self.load_records(1, buffer_size=0)
self.assertEqual(len(self.db.get_history()), 101)
self.load_records(1, buffer_size=1)
# 0.2 * 100 = 20, 101 - 20 = 81
self.assertEqual(len(self.db.get_history()), 81)
def test_cull_size_drop(self):
"""dropping records updates tracked buffer size"""
self.db = self.create_db() # skip the load-records init from setUp
self.db.size_limit = 1000
self.db.cull_fraction = 0.2
self.load_records(100, buffer_size=10)
self.assertEqual(len(self.db.get_history()), 100)
self.db.drop_record(self.db.get_history()[-1])
self.assertEqual(len(self.db.get_history()), 99)
self.load_records(1, buffer_size=5)
self.assertEqual(len(self.db.get_history()), 100)
self.load_records(1, buffer_size=5)
self.assertEqual(len(self.db.get_history()), 101)
self.load_records(1, buffer_size=1)
self.assertEqual(len(self.db.get_history()), 81)
def test_cull_size_update(self):
"""updating records updates tracked buffer size"""
self.db = self.create_db() # skip the load-records init from setUp
self.db.size_limit = 1000
self.db.cull_fraction = 0.2
self.load_records(100, buffer_size=10)
self.assertEqual(len(self.db.get_history()), 100)
msg_id = self.db.get_history()[-1]
self.db.update_record(msg_id, dict(result_buffers = [os.urandom(10)], buffers=[]))
self.assertEqual(len(self.db.get_history()), 100)
self.db.update_record(msg_id, dict(result_buffers = [os.urandom(11)], buffers=[]))
self.assertEqual(len(self.db.get_history()), 79)
class TestSQLiteBackend(TaskDBTest, TestCase):
@dec.skip_without('sqlite3')
def create_db(self):
location, fname = os.path.split(temp_db)
log = logging.getLogger('test')
log.setLevel(logging.CRITICAL)
return SQLiteDB(location=location, fname=fname, log=log)
def tearDown(self):
self.db._db.close()
def teardown():
"""cleanup task db file after all tests have run"""
try:
os.remove(temp_db)
except:
pass
| bsd-3-clause | -7,777,586,069,748,819,000 | 36.847134 | 94 | 0.565382 | false |
WeeFeeSuite/WeeFee | core/WeeMon.py | 1 | 2472 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import os
import sys
import time
# Add Parent Directory to path
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
from modules import devices
from modules import arguments
from modules import taskkill
__VERSION__ = "2.0.1"
# Moved reused code to WeeHelper
# Summary:
# Function to change card mode
# Args:
# interface -> String: name of card
# name -> String: new name of card
# mac -> String: new mac of card
# ch -> int: channel of card
def pymon(interface, name, mac, kill, ch=None):
'''
Function for Setting card mode and calling other functions;
author: Jarad
'''
mac_regex = "[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$"
if kill:
taskkill.kill_blocking_tasks()
card = devices.get_device(interface)
info = devices.get_info(card)
sinfo = devices.get_phy_info(card)
mode = devices.get_mode(interface)
print("Driver: {0}".format(info["driver"]))
print("Address: {0}".format(info["hwaddr"]))
print("Mode: {0}".format(mode))
print("Vendor: {0}".format(info["manufacturer"]))
if name:
name = name.decode('unicode_escape').encode('ascii', 'ignore')
devices.card_down(card)
if mode == "managed":
print("managed")
newcard = devices.set_monitor_mode(card, name)
elif mode == "monitor":
newcard = devices.set_managed_mode(card, name)
else:
print("Card mode unrecognized")
sys.exit(0)
if mac and re.match(mac_regex, mac.lower()):
try:
devices.set_mac(newcard, mac)
print("Mac: " + devices.get_mac(newcard) + "\n")
except:
print("Cannot Assign Requested Address.\n\n")
devices.card_up(newcard)
if ch:
devices.set_channel(newcard, int(ch))
print("Card: " + newcard.dev)
print("Mode: " + devices.get_mode(newcard.dev))
return
if __name__ == "__main__":
start = time.time()
arguments.root_check()
sys.stdout.write("[ WeeMon ]\n\n")
sys.stdout.write("Version: " + __VERSION__ + "\n\n")
results = arguments.args_parser("monitor")
pymon(
results['interface'],
results['name'],
results['mac'],
results['kill'],
results['channel'])
sys.stdout.write("\nTime: " + str(round(time.time() - start, 4)) + "\n")
| mit | -2,827,570,646,675,135,000 | 22.542857 | 76 | 0.586974 | false |
pombredanne/tahoe-lafs | src/allmydata/web/filenode.py | 7 | 21540 |
import simplejson
from twisted.web import http, static
from twisted.internet import defer
from nevow import url, rend
from nevow.inevow import IRequest
from allmydata.interfaces import ExistingChildError
from allmydata.monitor import Monitor
from allmydata.immutable.upload import FileHandle
from allmydata.mutable.publish import MutableFileHandle
from allmydata.mutable.common import MODE_READ
from allmydata.util import log, base32
from allmydata.util.encodingutil import quote_output
from allmydata.blacklist import FileProhibited, ProhibitedNode
from allmydata.web.common import text_plain, WebError, RenderMixin, \
boolean_of_arg, get_arg, should_create_intermediate_directories, \
MyExceptionHandler, parse_replace_arg, parse_offset_arg, \
get_format, get_mutable_type, get_filenode_metadata
from allmydata.web.check_results import CheckResultsRenderer, \
CheckAndRepairResultsRenderer, LiteralCheckResultsRenderer
from allmydata.web.info import MoreInfo
class ReplaceMeMixin:
def replace_me_with_a_child(self, req, client, replace):
# a new file is being uploaded in our place.
file_format = get_format(req, "CHK")
mutable_type = get_mutable_type(file_format)
if mutable_type is not None:
data = MutableFileHandle(req.content)
d = client.create_mutable_file(data, version=mutable_type)
def _uploaded(newnode):
d2 = self.parentnode.set_node(self.name, newnode,
overwrite=replace)
d2.addCallback(lambda res: newnode)
return d2
d.addCallback(_uploaded)
else:
assert file_format == "CHK"
uploadable = FileHandle(req.content, convergence=client.convergence)
d = self.parentnode.add_file(self.name, uploadable,
overwrite=replace)
def _done(filenode):
log.msg("webish upload complete",
facility="tahoe.webish", level=log.NOISY, umid="TCjBGQ")
if self.node:
# we've replaced an existing file (or modified a mutable
# file), so the response code is 200
req.setResponseCode(http.OK)
else:
# we've created a new file, so the code is 201
req.setResponseCode(http.CREATED)
return filenode.get_uri()
d.addCallback(_done)
return d
def replace_me_with_a_childcap(self, req, client, replace):
req.content.seek(0)
childcap = req.content.read()
childnode = client.create_node_from_uri(childcap, None, name=self.name)
d = self.parentnode.set_node(self.name, childnode, overwrite=replace)
d.addCallback(lambda res: childnode.get_uri())
return d
def replace_me_with_a_formpost(self, req, client, replace):
# create a new file, maybe mutable, maybe immutable
file_format = get_format(req, "CHK")
contents = req.fields["file"]
if file_format in ("SDMF", "MDMF"):
mutable_type = get_mutable_type(file_format)
uploadable = MutableFileHandle(contents.file)
d = client.create_mutable_file(uploadable, version=mutable_type)
def _uploaded(newnode):
d2 = self.parentnode.set_node(self.name, newnode,
overwrite=replace)
d2.addCallback(lambda res: newnode.get_uri())
return d2
d.addCallback(_uploaded)
return d
uploadable = FileHandle(contents.file, convergence=client.convergence)
d = self.parentnode.add_file(self.name, uploadable, overwrite=replace)
d.addCallback(lambda newnode: newnode.get_uri())
return d
class PlaceHolderNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
def __init__(self, client, parentnode, name):
rend.Page.__init__(self)
self.client = client
assert parentnode
self.parentnode = parentnode
self.name = name
self.node = None
def render_PUT(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
replace = parse_replace_arg(get_arg(req, "replace", "true"))
assert self.parentnode and self.name
if req.getHeader("content-range"):
raise WebError("Content-Range in PUT not yet supported",
http.NOT_IMPLEMENTED)
if not t:
return self.replace_me_with_a_child(req, self.client, replace)
if t == "uri":
return self.replace_me_with_a_childcap(req, self.client, replace)
raise WebError("PUT to a file: bad t=%s" % t)
def render_POST(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
replace = boolean_of_arg(get_arg(req, "replace", "true"))
if t == "upload":
# like PUT, but get the file data from an HTML form's input field.
# We could get here from POST /uri/mutablefilecap?t=upload,
# or POST /uri/path/file?t=upload, or
# POST /uri/path/dir?t=upload&name=foo . All have the same
# behavior, we just ignore any name= argument
d = self.replace_me_with_a_formpost(req, self.client, replace)
else:
# t=mkdir is handled in DirectoryNodeHandler._POST_mkdir, so
# there are no other t= values left to be handled by the
# placeholder.
raise WebError("POST to a file: bad t=%s" % t)
when_done = get_arg(req, "when_done", None)
if when_done:
d.addCallback(lambda res: url.URL.fromString(when_done))
return d
class FileNodeHandler(RenderMixin, rend.Page, ReplaceMeMixin):
def __init__(self, client, node, parentnode=None, name=None):
rend.Page.__init__(self)
self.client = client
assert node
self.node = node
self.parentnode = parentnode
self.name = name
def childFactory(self, ctx, name):
req = IRequest(ctx)
if isinstance(self.node, ProhibitedNode):
raise FileProhibited(self.node.reason)
if should_create_intermediate_directories(req):
raise WebError("Cannot create directory %s, because its "
"parent is a file, not a directory" % quote_output(name, encoding='utf-8'))
raise WebError("Files have no children, certainly not named %s"
% quote_output(name, encoding='utf-8'))
def render_GET(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
# t=info contains variable ophandles, so is not allowed an ETag.
FIXED_OUTPUT_TYPES = ["", "json", "uri", "readonly-uri"]
if not self.node.is_mutable() and t in FIXED_OUTPUT_TYPES:
# if the client already has the ETag then we can
# short-circuit the whole process.
si = self.node.get_storage_index()
if si and req.setETag('%s-%s' % (base32.b2a(si), t or "")):
return ""
if not t:
# just get the contents
# the filename arrives as part of the URL or in a form input
# element, and will be sent back in a Content-Disposition header.
# Different browsers use various character sets for this name,
# sometimes depending upon how language environment is
# configured. Firefox sends the equivalent of
# urllib.quote(name.encode("utf-8")), while IE7 sometimes does
# latin-1. Browsers cannot agree on how to interpret the name
# they see in the Content-Disposition header either, despite some
# 11-year old standards (RFC2231) that explain how to do it
# properly. So we assume that at least the browser will agree
# with itself, and echo back the same bytes that we were given.
filename = get_arg(req, "filename", self.name) or "unknown"
d = self.node.get_best_readable_version()
d.addCallback(lambda dn: FileDownloader(dn, filename))
return d
if t == "json":
# We do this to make sure that fields like size and
# mutable-type (which depend on the file on the grid and not
# just on the cap) are filled in. The latter gets used in
# tests, in particular.
#
# TODO: Make it so that the servermap knows how to update in
# a mode specifically designed to fill in these fields, and
# then update it in that mode.
if self.node.is_mutable():
d = self.node.get_servermap(MODE_READ)
else:
d = defer.succeed(None)
if self.parentnode and self.name:
d.addCallback(lambda ignored:
self.parentnode.get_metadata_for(self.name))
else:
d.addCallback(lambda ignored: None)
d.addCallback(lambda md: FileJSONMetadata(ctx, self.node, md))
return d
if t == "info":
return MoreInfo(self.node)
if t == "uri":
return FileURI(ctx, self.node)
if t == "readonly-uri":
return FileReadOnlyURI(ctx, self.node)
raise WebError("GET file: bad t=%s" % t)
def render_HEAD(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
if t:
raise WebError("HEAD file: bad t=%s" % t)
filename = get_arg(req, "filename", self.name) or "unknown"
d = self.node.get_best_readable_version()
d.addCallback(lambda dn: FileDownloader(dn, filename))
return d
def render_PUT(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
replace = parse_replace_arg(get_arg(req, "replace", "true"))
offset = parse_offset_arg(get_arg(req, "offset", None))
if not t:
if not replace:
# this is the early trap: if someone else modifies the
# directory while we're uploading, the add_file(overwrite=)
# call in replace_me_with_a_child will do the late trap.
raise ExistingChildError()
if self.node.is_mutable():
# Are we a readonly filenode? We shouldn't allow callers
# to try to replace us if we are.
if self.node.is_readonly():
raise WebError("PUT to a mutable file: replace or update"
" requested with read-only cap")
if offset is None:
return self.replace_my_contents(req)
if offset >= 0:
return self.update_my_contents(req, offset)
raise WebError("PUT to a mutable file: Invalid offset")
else:
if offset is not None:
raise WebError("PUT to a file: append operation invoked "
"on an immutable cap")
assert self.parentnode and self.name
return self.replace_me_with_a_child(req, self.client, replace)
if t == "uri":
if not replace:
raise ExistingChildError()
assert self.parentnode and self.name
return self.replace_me_with_a_childcap(req, self.client, replace)
raise WebError("PUT to a file: bad t=%s" % t)
def render_POST(self, ctx):
req = IRequest(ctx)
t = get_arg(req, "t", "").strip()
replace = boolean_of_arg(get_arg(req, "replace", "true"))
if t == "check":
d = self._POST_check(req)
elif t == "upload":
# like PUT, but get the file data from an HTML form's input field
# We could get here from POST /uri/mutablefilecap?t=upload,
# or POST /uri/path/file?t=upload, or
# POST /uri/path/dir?t=upload&name=foo . All have the same
# behavior, we just ignore any name= argument
if self.node.is_mutable():
d = self.replace_my_contents_with_a_formpost(req)
else:
if not replace:
raise ExistingChildError()
assert self.parentnode and self.name
d = self.replace_me_with_a_formpost(req, self.client, replace)
else:
raise WebError("POST to file: bad t=%s" % t)
when_done = get_arg(req, "when_done", None)
if when_done:
d.addCallback(lambda res: url.URL.fromString(when_done))
return d
def _maybe_literal(self, res, Results_Class):
if res:
return Results_Class(self.client, res)
return LiteralCheckResultsRenderer(self.client)
def _POST_check(self, req):
verify = boolean_of_arg(get_arg(req, "verify", "false"))
repair = boolean_of_arg(get_arg(req, "repair", "false"))
add_lease = boolean_of_arg(get_arg(req, "add-lease", "false"))
if repair:
d = self.node.check_and_repair(Monitor(), verify, add_lease)
d.addCallback(self._maybe_literal, CheckAndRepairResultsRenderer)
else:
d = self.node.check(Monitor(), verify, add_lease)
d.addCallback(self._maybe_literal, CheckResultsRenderer)
return d
def render_DELETE(self, ctx):
assert self.parentnode and self.name
d = self.parentnode.delete(self.name)
d.addCallback(lambda res: self.node.get_uri())
return d
def replace_my_contents(self, req):
req.content.seek(0)
new_contents = MutableFileHandle(req.content)
d = self.node.overwrite(new_contents)
d.addCallback(lambda res: self.node.get_uri())
return d
def update_my_contents(self, req, offset):
req.content.seek(0)
added_contents = MutableFileHandle(req.content)
d = self.node.get_best_mutable_version()
d.addCallback(lambda mv:
mv.update(added_contents, offset))
d.addCallback(lambda ignored:
self.node.get_uri())
return d
def replace_my_contents_with_a_formpost(self, req):
# we have a mutable file. Get the data from the formpost, and replace
# the mutable file's contents with it.
new_contents = req.fields['file']
new_contents = MutableFileHandle(new_contents.file)
d = self.node.overwrite(new_contents)
d.addCallback(lambda res: self.node.get_uri())
return d
class FileDownloader(rend.Page):
def __init__(self, filenode, filename):
rend.Page.__init__(self)
self.filenode = filenode
self.filename = filename
def parse_range_header(self, range):
# Parse a byte ranges according to RFC 2616 "14.35.1 Byte
# Ranges". Returns None if the range doesn't make sense so it
# can be ignored (per the spec). When successful, returns a
# list of (first,last) inclusive range tuples.
filesize = self.filenode.get_size()
assert isinstance(filesize, (int,long)), filesize
try:
# byte-ranges-specifier
units, rangeset = range.split('=', 1)
if units != 'bytes':
return None # nothing else supported
def parse_range(r):
first, last = r.split('-', 1)
if first is '':
# suffix-byte-range-spec
first = filesize - long(last)
last = filesize - 1
else:
# byte-range-spec
# first-byte-pos
first = long(first)
# last-byte-pos
if last is '':
last = filesize - 1
else:
last = long(last)
if last < first:
raise ValueError
return (first, last)
# byte-range-set
#
# Note: the spec uses "1#" for the list of ranges, which
# implicitly allows whitespace around the ',' separators,
# so strip it.
return [ parse_range(r.strip()) for r in rangeset.split(',') ]
except ValueError:
return None
def renderHTTP(self, ctx):
req = IRequest(ctx)
gte = static.getTypeAndEncoding
ctype, encoding = gte(self.filename,
static.File.contentTypes,
static.File.contentEncodings,
defaultType="text/plain")
req.setHeader("content-type", ctype)
if encoding:
req.setHeader("content-encoding", encoding)
if boolean_of_arg(get_arg(req, "save", "False")):
# tell the browser to save the file rather display it we don't
# try to encode the filename, instead we echo back the exact same
# bytes we were given in the URL. See the comment in
# FileNodeHandler.render_GET for the sad details.
req.setHeader("content-disposition",
'attachment; filename="%s"' % self.filename)
filesize = self.filenode.get_size()
assert isinstance(filesize, (int,long)), filesize
first, size = 0, None
contentsize = filesize
req.setHeader("accept-ranges", "bytes")
# TODO: for mutable files, use the roothash. For LIT, hash the data.
# or maybe just use the URI for CHK and LIT.
rangeheader = req.getHeader('range')
if rangeheader:
ranges = self.parse_range_header(rangeheader)
# ranges = None means the header didn't parse, so ignore
# the header as if it didn't exist. If is more than one
# range, then just return the first for now, until we can
# generate multipart/byteranges.
if ranges is not None:
first, last = ranges[0]
if first >= filesize:
raise WebError('First beyond end of file',
http.REQUESTED_RANGE_NOT_SATISFIABLE)
else:
first = max(0, first)
last = min(filesize-1, last)
req.setResponseCode(http.PARTIAL_CONTENT)
req.setHeader('content-range',"bytes %s-%s/%s" %
(str(first), str(last),
str(filesize)))
contentsize = last - first + 1
size = contentsize
req.setHeader("content-length", b"%d" % contentsize)
if req.method == "HEAD":
return ""
finished = []
def _request_finished(ign):
finished.append(True)
req.notifyFinish().addBoth(_request_finished)
d = self.filenode.read(req, first, size)
def _finished(ign):
if not finished:
req.finish()
def _error(f):
lp = log.msg("error during GET", facility="tahoe.webish", failure=f,
level=log.UNUSUAL, umid="xSiF3w")
if finished:
log.msg("but it's too late to tell them", parent=lp,
level=log.UNUSUAL, umid="j1xIbw")
return
req._tahoe_request_had_error = f # for HTTP-style logging
if req.startedWriting:
# The content-type is already set, and the response code has
# already been sent, so we can't provide a clean error
# indication. We can emit text (which a browser might
# interpret as something else), and if we sent a Size header,
# they might notice that we've truncated the data. Keep the
# error message small to improve the chances of having our
# error response be shorter than the intended results.
#
# We don't have a lot of options, unfortunately.
req.write("problem during download\n")
req.finish()
else:
# We haven't written anything yet, so we can provide a
# sensible error message.
eh = MyExceptionHandler()
eh.renderHTTP_exception(ctx, f)
d.addCallbacks(_finished, _error)
return req.deferred
def FileJSONMetadata(ctx, filenode, edge_metadata):
rw_uri = filenode.get_write_uri()
ro_uri = filenode.get_readonly_uri()
data = ("filenode", get_filenode_metadata(filenode))
if ro_uri:
data[1]['ro_uri'] = ro_uri
if rw_uri:
data[1]['rw_uri'] = rw_uri
verifycap = filenode.get_verify_cap()
if verifycap:
data[1]['verify_uri'] = verifycap.to_string()
if edge_metadata is not None:
data[1]['metadata'] = edge_metadata
return text_plain(simplejson.dumps(data, indent=1) + "\n", ctx)
def FileURI(ctx, filenode):
return text_plain(filenode.get_uri(), ctx)
def FileReadOnlyURI(ctx, filenode):
if filenode.is_readonly():
return text_plain(filenode.get_uri(), ctx)
return text_plain(filenode.get_readonly_uri(), ctx)
class FileNodeDownloadHandler(FileNodeHandler):
def childFactory(self, ctx, name):
return FileNodeDownloadHandler(self.client, self.node, name=name)
| gpl-2.0 | 1,709,505,461,712,602,400 | 40.10687 | 102 | 0.571681 | false |
hgl888/chromium-crosswalk-efl | tools/perf/page_sets/top_10.py | 33 | 4115 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class SimpleScrollPage(page_module.Page):
def __init__(self, url, page_set, credentials='', name=''):
super(SimpleScrollPage, self).__init__(url, page_set=page_set, name=name)
self.credentials = credentials
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class Google(SimpleScrollPage):
def __init__(self, page_set):
super(Google, self).__init__(
url='https://www.google.com/#hl=en&q=barack+obama', page_set=page_set)
def RunNavigateSteps(self, action_runner):
super(Google, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='Next')
class Gmail(SimpleScrollPage):
def __init__(self, page_set):
super(Gmail, self).__init__(
url='https://mail.google.com/mail/',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(Gmail, self).RunNavigateSteps(action_runner)
action_runner.WaitForJavaScriptCondition(
'window.gmonkey !== undefined &&'
'document.getElementById("gb") !== null')
class GoogleCalendar(SimpleScrollPage):
def __init__(self, page_set):
super(GoogleCalendar, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(GoogleCalendar, self).RunNavigateSteps(action_runner)
action_runner.ExecuteJavaScript('''
(function() { var elem = document.createElement("meta");
elem.name="viewport";
elem.content="initial-scale=1";
document.body.appendChild(elem);
})();''')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
class Youtube(SimpleScrollPage):
def __init__(self, page_set):
super(Youtube, self).__init__(
url='http://www.youtube.com',
page_set=page_set,
credentials='google')
def RunNavigateSteps(self, action_runner):
super(Youtube, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
class Facebook(SimpleScrollPage):
def __init__(self, page_set):
super(Facebook, self).__init__(
url='http://www.facebook.com/barackobama',
page_set=page_set,
credentials='facebook',
name='Facebook')
def RunNavigateSteps(self, action_runner):
super(Facebook, self).RunNavigateSteps(action_runner)
action_runner.WaitForElement(text='About')
class Top10PageSet(page_set_module.PageSet):
"""10 Pages chosen from Alexa top sites"""
def __init__(self):
super(Top10PageSet, self).__init__(
archive_data_file='data/top_10.json',
credentials_path='data/credentials.json',
user_agent_type='desktop',
bucket=page_set_module.PARTNER_BUCKET)
# top google property; a google tab is often open
self.AddPage(Google(self))
# productivity, top google properties
# TODO(dominikg): fix crbug.com/386152
#self.AddPage(Gmail(self))
# productivity, top google properties
self.AddPage(GoogleCalendar(self))
# #3 (Alexa global)
self.AddPage(Youtube(self))
# top social, Public profile
self.AddPage(Facebook(self))
# #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddPage(SimpleScrollPage('http://en.wikipedia.org/wiki/Wikipedia',
self, name='Wikipedia'))
# #1 world commerce website by visits; #3 commerce in the US by time spent
self.AddPage(SimpleScrollPage('http://www.amazon.com', self))
# #4 Alexa
self.AddPage(SimpleScrollPage('http://www.yahoo.com/', self))
# #16 Alexa
self.AddPage(SimpleScrollPage('http://www.bing.com/', self))
# #20 Alexa
self.AddPage(SimpleScrollPage('http://www.ask.com/', self))
| bsd-3-clause | 2,987,099,994,312,314,400 | 31.401575 | 78 | 0.675334 | false |
alpeware/gcms | common.py | 1 | 5675 | """
GCMS
(c) 2017 Alpeware LLC
"""
import logging
import re
import string
from google.appengine.api import taskqueue
from google.appengine.api import memcache
TAGS_RE = re.compile('(<p[^>]+>)<span([^>]*)>[Tt]ags:([^<]+)</span></p>')
TITLE_RE = re.compile('<p class="title"([^>]+)><span([^>]+)>([^<]+)')
COMMENTS_RE = re.compile('(<div\sstyle="border:1px[^"]+">)')
IMAGES_RE = re.compile('(<span[^>]+><img[^>]+></span>)')
HEAD_RE = '</head>'
BODY_RE = '<body\s(style="background-color:[^;]+;)[^>]+>'
VIEWPORT = '<meta name="viewport" content="width=device-width, initial-scale=1">'
CUSTOM_CSS = '<style>img { width: 100% }</style>';
DISQUS_SCRIPT = """
<div id="disqus_thread"></div>
<script>
var disqus_config = function () {
this.page.url = 'https://www.alpeware.com/%s';
this.page.identifier = '/%s';
};
(function() { // DON'T EDIT BELOW THIS LINE
var d = document, s = d.createElement('script');
s.src = 'https://www-alpeware-com.disqus.com/embed.js';
s.setAttribute('data-timestamp', +new Date());
(d.head || d.body).appendChild(s);
})();
</script>
<noscript>Please enable JavaScript to view the <a href="https://disqus.com/?ref_noscript">comments powered by Disqus.</a></noscript>
"""
ANALYTICS_SCRIPT = """
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-91886762-1', 'auto');
ga('send', 'pageview');
</script>
"""
ADSENSE_SCRIPT = """
<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<!-- Alpeware -->
<ins class="adsbygoogle"
style="display:block"
data-ad-client="ca-pub-6123540793627831"
data-ad-slot="8549779304"
data-ad-format="auto"></ins>
<script>
(adsbygoogle = window.adsbygoogle || []).push({});
</script>
"""
ADSENSE_PAGE_ADS_SCRIPT = """
<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>
<script>
(adsbygoogle = window.adsbygoogle || []).push({
google_ad_client: "ca-pub-6123540793627831",
enable_page_level_ads: true
});
</script>
"""
POST_TMPL_RE = re.compile('<p[^>]*><span[^>]*>//--\+ Post</span></p>(.*)<p[^>]*><span[^>]*>Post \+--//</span></p>')
def make_resp(match):
s = match.group(0)
s = string.replace(s, 'display: inline-block;', '')
s = string.replace(s, 'width:', 'max-width:')
s = string.replace(s, 'height:', 'max-height:')
return s
# TODO: consolidate and DRY both parsing methods
def parse_landing_page(html, pages):
title = 'Alpeware'
tags = ['landing page']
title_tag = '<title>' + title + '</title>'
fixed_head = re.sub(HEAD_RE, title_tag + VIEWPORT + CUSTOM_CSS + HEAD_RE, html)
html = re.sub(BODY_RE, r'<body style="background-color:#f3f3f3;"><div \1max-width:80%;margin-left:auto;margin-right:auto;margin-top:10px;padding:20px;">', fixed_head)
html = re.sub(IMAGES_RE, make_resp, html)
html = re.sub('</body>', ANALYTICS_SCRIPT + '</body>', html)
logging.debug('processing landing page')
post_tmpl = ''
post_section = ''
tmpl_match = re.search(POST_TMPL_RE, html)
if tmpl_match:
post_tmpl = tmpl_match.group(1)
post_tmpl = string.replace(post_tmpl, '{post.slug}', "<a href='{post.slug}'>{post.title}</a>")
for page in pages:
page_dict = page.to_dict()
if not page_dict['slug'].startswith('0-'):
p = post_tmpl
for k in page._properties:
if isinstance(page_dict[k], basestring):
p = string.replace(p, ("{post.%s}" % k), page_dict[k])
post_section += p
with_section = re.sub(POST_TMPL_RE, post_section, html)
return (title, tags, with_section)
return (title, tags, html)
def fix_page(html, slug):
title = 'Blog'
tags = []
tags_links = ''
tags_match = re.search(TAGS_RE, html)
if tags_match:
tags = [i.strip() for i in tags_match.group(3).split(',')]
tags_links = tags_match.group(1) + 'Tags: ' + ', '.join(["<a href='/tags/%s'>%s</a>" % (i, i) for i in tags]) + '</p>'
no_tags = re.sub(TAGS_RE, tags_links, html)
title_match = re.search(TITLE_RE, no_tags)
if title_match:
title = title_match.group(3)
title_tag = '<title>' + title + '</title>'
resp_imgs = re.sub(IMAGES_RE, make_resp, no_tags)
def style_comms(match):
s = match.group(0)
s = string.replace(s, 'border:1px', '')
return s
styled_comms = re.sub(COMMENTS_RE, style_comms, resp_imgs)
add_comments = re.sub('</body>', ADSENSE_SCRIPT + ANALYTICS_SCRIPT + (DISQUS_SCRIPT % (slug, slug)) + '</body>', styled_comms)
fixed_head = re.sub(HEAD_RE, title_tag + VIEWPORT + CUSTOM_CSS + ADSENSE_PAGE_ADS_SCRIPT + HEAD_RE, add_comments)
fixed_body = re.sub(BODY_RE, r'<body style="background-color:#f3f3f3;"><div \1max-width:80%;margin-left:auto;margin-right:auto;margin-top:10px;padding:20px;">', fixed_head)
return (title, tags, fixed_body)
def enqueue_post(file_id):
queue = taskqueue.Queue(name='post-queue')
task = taskqueue.Task(
url='/worker/post',
target='worker',
params={'file_id': file_id})
rpc = queue.add_async(task)
task = rpc.get_result()
def start_caching():
queue = taskqueue.Queue(name='index-queue')
task = taskqueue.Task(
url='/worker/index',
target='worker')
rpc = queue.add_async(task)
task = rpc.get_result()
| mit | 4,333,174,895,365,929,500 | 34.46875 | 176 | 0.606872 | false |
jumpstarter-io/neutron | neutron/tests/unit/vmware/test_nsx_sync.py | 9 | 33065 | # Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import time
import mock
from oslo.config import cfg
from neutron.api.v2 import attributes as attr
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.extensions import l3
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.vmware.api_client import client
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version
from neutron.plugins.vmware.common import sync
from neutron.plugins.vmware.dbexts import db
from neutron.plugins.vmware import nsx_cluster as cluster
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware import plugin
from neutron.tests import base
from neutron.tests.unit import test_api_v2
from neutron.tests.unit import testlib_api
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.apiclient import fake
LOG = log.getLogger(__name__)
_uuid = test_api_v2._uuid
LSWITCHES = [{'uuid': _uuid(), 'name': 'ls-1'},
{'uuid': _uuid(), 'name': 'ls-2'}]
LSWITCHPORTS = [{'uuid': _uuid(), 'name': 'lp-1'},
{'uuid': _uuid(), 'name': 'lp-2'}]
LROUTERS = [{'uuid': _uuid(), 'name': 'lr-1'},
{'uuid': _uuid(), 'name': 'lr-2'}]
class CacheTestCase(base.BaseTestCase):
"""Test suite providing coverage for the Cache class."""
def setUp(self):
self.nsx_cache = sync.NsxCache()
for lswitch in LSWITCHES:
self.nsx_cache._uuid_dict_mappings[lswitch['uuid']] = (
self.nsx_cache._lswitches)
self.nsx_cache._lswitches[lswitch['uuid']] = (
{'data': lswitch,
'hash': hash(jsonutils.dumps(lswitch))})
for lswitchport in LSWITCHPORTS:
self.nsx_cache._uuid_dict_mappings[lswitchport['uuid']] = (
self.nsx_cache._lswitchports)
self.nsx_cache._lswitchports[lswitchport['uuid']] = (
{'data': lswitchport,
'hash': hash(jsonutils.dumps(lswitchport))})
for lrouter in LROUTERS:
self.nsx_cache._uuid_dict_mappings[lrouter['uuid']] = (
self.nsx_cache._lrouters)
self.nsx_cache._lrouters[lrouter['uuid']] = (
{'data': lrouter,
'hash': hash(jsonutils.dumps(lrouter))})
super(CacheTestCase, self).setUp()
def test_get_lswitches(self):
ls_uuids = self.nsx_cache.get_lswitches()
self.assertEqual(set(ls_uuids),
set([ls['uuid'] for ls in LSWITCHES]))
def test_get_lswitchports(self):
lp_uuids = self.nsx_cache.get_lswitchports()
self.assertEqual(set(lp_uuids),
set([lp['uuid'] for lp in LSWITCHPORTS]))
def test_get_lrouters(self):
lr_uuids = self.nsx_cache.get_lrouters()
self.assertEqual(set(lr_uuids),
set([lr['uuid'] for lr in LROUTERS]))
def test_get_lswitches_changed_only(self):
ls_uuids = self.nsx_cache.get_lswitches(changed_only=True)
self.assertEqual(0, len(ls_uuids))
def test_get_lswitchports_changed_only(self):
lp_uuids = self.nsx_cache.get_lswitchports(changed_only=True)
self.assertEqual(0, len(lp_uuids))
def test_get_lrouters_changed_only(self):
lr_uuids = self.nsx_cache.get_lrouters(changed_only=True)
self.assertEqual(0, len(lr_uuids))
def _verify_update(self, new_resource, changed=True, hit=True):
cached_resource = self.nsx_cache[new_resource['uuid']]
self.assertEqual(new_resource, cached_resource['data'])
self.assertEqual(hit, cached_resource.get('hit', False))
self.assertEqual(changed,
cached_resource.get('changed', False))
def test_update_lswitch_new_item(self):
new_switch_uuid = _uuid()
new_switch = {'uuid': new_switch_uuid, 'name': 'new_switch'}
self.nsx_cache.update_lswitch(new_switch)
self.assertIn(new_switch_uuid, self.nsx_cache._lswitches.keys())
self._verify_update(new_switch)
def test_update_lswitch_existing_item(self):
switch = LSWITCHES[0]
switch['name'] = 'new_name'
self.nsx_cache.update_lswitch(switch)
self.assertIn(switch['uuid'], self.nsx_cache._lswitches.keys())
self._verify_update(switch)
def test_update_lswitchport_new_item(self):
new_switchport_uuid = _uuid()
new_switchport = {'uuid': new_switchport_uuid,
'name': 'new_switchport'}
self.nsx_cache.update_lswitchport(new_switchport)
self.assertIn(new_switchport_uuid,
self.nsx_cache._lswitchports.keys())
self._verify_update(new_switchport)
def test_update_lswitchport_existing_item(self):
switchport = LSWITCHPORTS[0]
switchport['name'] = 'new_name'
self.nsx_cache.update_lswitchport(switchport)
self.assertIn(switchport['uuid'],
self.nsx_cache._lswitchports.keys())
self._verify_update(switchport)
def test_update_lrouter_new_item(self):
new_router_uuid = _uuid()
new_router = {'uuid': new_router_uuid,
'name': 'new_router'}
self.nsx_cache.update_lrouter(new_router)
self.assertIn(new_router_uuid,
self.nsx_cache._lrouters.keys())
self._verify_update(new_router)
def test_update_lrouter_existing_item(self):
router = LROUTERS[0]
router['name'] = 'new_name'
self.nsx_cache.update_lrouter(router)
self.assertIn(router['uuid'],
self.nsx_cache._lrouters.keys())
self._verify_update(router)
def test_process_updates_initial(self):
# Clear cache content to simulate first-time filling
self.nsx_cache._lswitches.clear()
self.nsx_cache._lswitchports.clear()
self.nsx_cache._lrouters.clear()
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
self._verify_update(resource)
def test_process_updates_no_change(self):
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
self._verify_update(resource, changed=False)
def test_process_updates_with_changes(self):
LSWITCHES[0]['name'] = 'altered'
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
changed = (True if resource['uuid'] == LSWITCHES[0]['uuid']
else False)
self._verify_update(resource, changed=changed)
def _test_process_updates_with_removals(self):
lswitches = LSWITCHES[:]
lswitch = lswitches.pop()
self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
hit = (False if resource['uuid'] == lswitch['uuid']
else True)
self._verify_update(resource, changed=False, hit=hit)
return (lswitch, lswitches)
def test_process_updates_with_removals(self):
self._test_process_updates_with_removals()
def test_process_updates_cleanup_after_delete(self):
deleted_lswitch, lswitches = self._test_process_updates_with_removals()
self.nsx_cache.process_deletes()
self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
self.assertNotIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
def test_update_resource_does_not_cleanup_deleted_resources(self):
deleted_lswitch, lswitches = self._test_process_updates_with_removals()
self.nsx_cache.process_deletes()
self.nsx_cache.update_lswitch(deleted_lswitch)
self.assertIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
def _verify_delete(self, resource, deleted=True, hit=True):
cached_resource = self.nsx_cache[resource['uuid']]
data_field = 'data_bk' if deleted else 'data'
self.assertEqual(resource, cached_resource[data_field])
self.assertEqual(hit, cached_resource.get('hit', False))
self.assertEqual(deleted,
cached_resource.get('changed', False))
def _set_hit(self, resources, uuid_to_delete=None):
for resource in resources:
if resource['data']['uuid'] != uuid_to_delete:
resource['hit'] = True
def test_process_deletes_no_change(self):
# Mark all resources as hit
self._set_hit(self.nsx_cache._lswitches.values())
self._set_hit(self.nsx_cache._lswitchports.values())
self._set_hit(self.nsx_cache._lrouters.values())
self.nsx_cache.process_deletes()
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
self._verify_delete(resource, hit=False, deleted=False)
def test_process_deletes_with_removals(self):
# Mark all resources but one as hit
uuid_to_delete = LSWITCHPORTS[0]['uuid']
self._set_hit(self.nsx_cache._lswitches.values(),
uuid_to_delete)
self._set_hit(self.nsx_cache._lswitchports.values(),
uuid_to_delete)
self._set_hit(self.nsx_cache._lrouters.values(),
uuid_to_delete)
self.nsx_cache.process_deletes()
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
deleted = resource['uuid'] == uuid_to_delete
self._verify_delete(resource, hit=False, deleted=deleted)
class SyncLoopingCallTestCase(base.BaseTestCase):
def test_looping_calls(self):
# Avoid runs of the synchronization process - just start
# the looping call
with mock.patch.object(
sync.NsxSynchronizer, '_synchronize_state', return_value=0.01):
synchronizer = sync.NsxSynchronizer(mock.ANY, mock.ANY,
100, 0, 0)
time.sleep(0.03)
# stop looping call before asserting
synchronizer._sync_looping_call.stop()
# Just verify the looping call has been called, trying
# to assess the exact number of calls would be unreliable
self.assertTrue(synchronizer._synchronize_state.call_count)
class SyncTestCase(testlib_api.SqlTestCase):
def setUp(self):
# mock api client
self.fc = fake.FakeClient(vmware.STUBS_PATH)
mock_api = mock.patch(vmware.NSXAPI_NAME, autospec=True)
# Avoid runs of the synchronizer looping call
# These unit tests will excplicitly invoke synchronization
patch_sync = mock.patch.object(sync, '_start_loopingcall')
self.mock_api = mock_api.start()
patch_sync.start()
self.mock_api.return_value.login.return_value = "the_cookie"
# Emulate tests against NSX 3.x
self.mock_api.return_value.get_version.return_value = (
version.Version("3.1"))
self.mock_api.return_value.request.side_effect = self.fc.fake_request
self.fake_cluster = cluster.NSXCluster(
name='fake-cluster', nsx_controllers=['1.1.1.1:999'],
default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar')
self.fake_cluster.api_client = client.NsxApiClient(
('1.1.1.1', '999', True),
self.fake_cluster.nsx_user, self.fake_cluster.nsx_password,
http_timeout=self.fake_cluster.http_timeout,
retries=self.fake_cluster.retries,
redirects=self.fake_cluster.redirects)
# Instantiate Neutron plugin
# and setup needed config variables
args = ['--config-file', vmware.get_fake_conf('neutron.conf.test'),
'--config-file', vmware.get_fake_conf('nsx.ini.test')]
self.config_parse(args=args)
cfg.CONF.set_override('allow_overlapping_ips', True)
self._plugin = plugin.NsxPlugin()
# Mock neutron manager plugin load functions to speed up tests
mock_nm_get_plugin = mock.patch('neutron.manager.NeutronManager.'
'get_plugin')
mock_nm_get_service_plugins = mock.patch(
'neutron.manager.NeutronManager.get_service_plugins')
self.mock_nm_get_plugin = mock_nm_get_plugin.start()
self.mock_nm_get_plugin.return_value = self._plugin
mock_nm_get_service_plugins.start()
super(SyncTestCase, self).setUp()
self.addCleanup(self.fc.reset_all)
@contextlib.contextmanager
def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2):
def network(idx):
return {'network': {'name': 'net-%s' % idx,
'admin_state_up': True,
'shared': False,
'port_security_enabled': True,
'tenant_id': 'foo'}}
def subnet(idx, net_id):
return {'subnet':
{'cidr': '10.10.%s.0/24' % idx,
'name': 'sub-%s' % idx,
'gateway_ip': attr.ATTR_NOT_SPECIFIED,
'allocation_pools': attr.ATTR_NOT_SPECIFIED,
'ip_version': 4,
'dns_nameservers': attr.ATTR_NOT_SPECIFIED,
'host_routes': attr.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
'network_id': net_id,
'tenant_id': 'foo'}}
def port(idx, net_id):
return {'port': {'network_id': net_id,
'name': 'port-%s' % idx,
'admin_state_up': True,
'device_id': 'miao',
'device_owner': 'bau',
'fixed_ips': attr.ATTR_NOT_SPECIFIED,
'mac_address': attr.ATTR_NOT_SPECIFIED,
'tenant_id': 'foo'}}
def router(idx):
# Use random uuids as names
return {'router': {'name': 'rtr-%s' % idx,
'admin_state_up': True,
'tenant_id': 'foo'}}
networks = []
ports = []
routers = []
for i in range(net_size):
net = self._plugin.create_network(ctx, network(i))
networks.append(net)
self._plugin.create_subnet(ctx, subnet(i, net['id']))
for j in range(port_size):
ports.append(self._plugin.create_port(
ctx, port("%s-%s" % (i, j), net['id'])))
for i in range(router_size):
routers.append(self._plugin.create_router(ctx, router(i)))
# Do not return anything as the user does need the actual
# data created
yield
# Remove everything
for router in routers:
self._plugin.delete_router(ctx, router['id'])
for port in ports:
self._plugin.delete_port(ctx, port['id'])
# This will remove networks and subnets
for network in networks:
self._plugin.delete_network(ctx, network['id'])
def _get_tag_dict(self, tags):
return dict((tag['scope'], tag['tag']) for tag in tags)
def _test_sync(self, exp_net_status,
exp_port_status, exp_router_status,
action_callback=None, sp=None):
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
neutron_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
neutron_port_id = self._get_tag_dict(
self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
neutron_rtr_id = self._get_tag_dict(
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
if action_callback:
action_callback(ls_uuid, lp_uuid, lr_uuid)
# Make chunk big enough to read everything
if not sp:
sp = sync.SyncParameters(100)
self._plugin._synchronizer._synchronize_state(sp)
# Verify element is in expected status
# TODO(salv-orlando): Verify status for all elements
ctx = context.get_admin_context()
neutron_net = self._plugin.get_network(ctx, neutron_net_id)
neutron_port = self._plugin.get_port(ctx, neutron_port_id)
neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id)
self.assertEqual(exp_net_status, neutron_net['status'])
self.assertEqual(exp_port_status, neutron_port['status'])
self.assertEqual(exp_router_status, neutron_rtr['status'])
def _action_callback_status_down(self, ls_uuid, lp_uuid, lr_uuid):
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
self.fc._fake_lswitch_lport_dict[lp_uuid]['status'] = 'false'
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
def test_initial_sync(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
self._test_sync(
constants.NET_STATUS_ACTIVE,
constants.PORT_STATUS_ACTIVE,
constants.NET_STATUS_ACTIVE)
def test_initial_sync_with_resources_down(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
self._test_sync(
constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
constants.NET_STATUS_DOWN, self._action_callback_status_down)
def test_resync_with_resources_down(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
sp = sync.SyncParameters(100)
self._plugin._synchronizer._synchronize_state(sp)
# Ensure the synchronizer performs a resync
sp.init_sync_performed = True
self._test_sync(
constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
constants.NET_STATUS_DOWN, self._action_callback_status_down,
sp=sp)
def _action_callback_del_resource(self, ls_uuid, lp_uuid, lr_uuid):
del self.fc._fake_lswitch_dict[ls_uuid]
del self.fc._fake_lswitch_lport_dict[lp_uuid]
del self.fc._fake_lrouter_dict[lr_uuid]
def test_initial_sync_with_resources_removed(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
self._test_sync(
constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
constants.NET_STATUS_ERROR, self._action_callback_del_resource)
def test_resync_with_resources_removed(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
sp = sync.SyncParameters(100)
self._plugin._synchronizer._synchronize_state(sp)
# Ensure the synchronizer performs a resync
sp.init_sync_performed = True
self._test_sync(
constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
constants.NET_STATUS_ERROR, self._action_callback_del_resource,
sp=sp)
def _test_sync_with_chunk_larger_maxpagesize(
self, net_size, port_size, router_size, chunk_size, exp_calls):
ctx = context.get_admin_context()
real_func = nsxlib.get_single_query_page
sp = sync.SyncParameters(chunk_size)
with self._populate_data(ctx, net_size=net_size,
port_size=port_size,
router_size=router_size):
with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15):
# The following mock is just for counting calls,
# but we will still run the actual function
with mock.patch.object(
nsxlib, 'get_single_query_page',
side_effect=real_func) as mock_get_page:
self._test_sync(
constants.NET_STATUS_ACTIVE,
constants.PORT_STATUS_ACTIVE,
constants.NET_STATUS_ACTIVE,
sp=sp)
# As each resource type does not exceed the maximum page size,
# the method should be called once for each resource type
self.assertEqual(exp_calls, mock_get_page.call_count)
def test_sync_chunk_larger_maxpagesize_no_multiple_requests(self):
# total resource size = 20
# total size for each resource does not exceed max page size (15)
self._test_sync_with_chunk_larger_maxpagesize(
net_size=5, port_size=2, router_size=5,
chunk_size=20, exp_calls=3)
def test_sync_chunk_larger_maxpagesize_triggers_multiple_requests(self):
# total resource size = 48
# total size for each resource does exceed max page size (15)
self._test_sync_with_chunk_larger_maxpagesize(
net_size=16, port_size=1, router_size=16,
chunk_size=48, exp_calls=6)
def test_sync_multi_chunk(self):
# The fake NSX API client cannot be used for this test
ctx = context.get_admin_context()
# Generate 4 networks, 1 port per network, and 4 routers
with self._populate_data(ctx, net_size=4, port_size=1, router_size=4):
fake_lswitches = jsonutils.loads(
self.fc.handle_get('/ws.v1/lswitch'))['results']
fake_lrouters = jsonutils.loads(
self.fc.handle_get('/ws.v1/lrouter'))['results']
fake_lswitchports = jsonutils.loads(
self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results']
return_values = [
# Chunk 0 - lswitches
(fake_lswitches, None, 4),
# Chunk 0 - lrouters
(fake_lrouters[:2], 'xxx', 4),
# Chunk 0 - lports (size only)
([], 'start', 4),
# Chunk 1 - lrouters (2 more) (lswitches are skipped)
(fake_lrouters[2:], None, None),
# Chunk 1 - lports
(fake_lswitchports, None, 4)]
def fake_fetch_data(*args, **kwargs):
return return_values.pop(0)
# 2 Chunks, with 6 resources each.
# 1st chunk lswitches and lrouters
# 2nd chunk lrouters and lports
# Mock _fetch_data
with mock.patch.object(
self._plugin._synchronizer, '_fetch_data',
side_effect=fake_fetch_data):
sp = sync.SyncParameters(6)
def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor):
self._plugin._synchronizer._synchronize_state(sp)
self.assertEqual(chunk_idx, sp.current_chunk)
self.assertEqual(ls_cursor, sp.ls_cursor)
self.assertEqual(lr_cursor, sp.lr_cursor)
self.assertEqual(lp_cursor, sp.lp_cursor)
# check 1st chunk
do_chunk(1, None, 'xxx', 'start')
# check 2nd chunk
do_chunk(0, None, None, None)
# Chunk size should have stayed the same
self.assertEqual(sp.chunk_size, 6)
def test_synchronize_network(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a network down to verify synchronization
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin._get_network(ctx, q_net_id)
self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
# Reload from db
q_nets = self._plugin.get_networks(ctx)
for q_net in q_nets:
if q_net['id'] == q_net_id:
exp_status = constants.NET_STATUS_DOWN
else:
exp_status = constants.NET_STATUS_ACTIVE
self.assertEqual(exp_status, q_net['status'])
def test_synchronize_network_not_found_in_db_no_raise(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a network down to verify synchronization
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin._get_network(ctx, q_net_id)
with mock.patch.object(self._plugin,
'_get_network') as _get_network:
_get_network.side_effect = n_exc.NetworkNotFound(
net_id=q_net_data['id'])
self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
def test_synchronize_network_on_get(self):
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a network down to verify punctual synchronization
ls_uuid = self.fc._fake_lswitch_dict.keys()[0]
q_net_id = self._get_tag_dict(
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
q_net_data = self._plugin.get_network(ctx, q_net_id)
self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])
def test_synchronize_port_not_found_in_db_no_raise(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a port down to verify synchronization
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
lport['status'] = 'true'
q_port_data = self._plugin._get_port(ctx, q_port_id)
with mock.patch.object(self._plugin,
'_get_port') as _get_port:
_get_port.side_effect = n_exc.PortNotFound(
port_id=q_port_data['id'])
self._plugin._synchronizer.synchronize_port(ctx, q_port_data)
def test_synchronize_port(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a port down to verify synchronization
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
lport['status'] = 'true'
q_port_data = self._plugin._get_port(ctx, q_port_id)
self._plugin._synchronizer.synchronize_port(ctx, q_port_data)
# Reload from db
q_ports = self._plugin.get_ports(ctx)
for q_port in q_ports:
if q_port['id'] == q_port_id:
exp_status = constants.PORT_STATUS_ACTIVE
else:
exp_status = constants.PORT_STATUS_DOWN
self.assertEqual(exp_status, q_port['status'])
def test_synchronize_port_on_get(self):
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a port down to verify punctual synchronization
lp_uuid = self.fc._fake_lswitch_lport_dict.keys()[0]
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
lport['status'] = 'false'
q_port_data = self._plugin.get_port(ctx, q_port_id)
self.assertEqual(constants.PORT_STATUS_DOWN,
q_port_data['status'])
def test_synchronize_routernot_found_in_db_no_raise(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a router down to verify synchronization
lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
q_rtr_id = self._get_tag_dict(
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
with mock.patch.object(self._plugin,
'_get_router') as _get_router:
_get_router.side_effect = l3.RouterNotFound(
router_id=q_rtr_data['id'])
self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
def test_synchronize_router(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a router down to verify synchronization
lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
q_rtr_id = self._get_tag_dict(
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
# Reload from db
q_routers = self._plugin.get_routers(ctx)
for q_rtr in q_routers:
if q_rtr['id'] == q_rtr_id:
exp_status = constants.NET_STATUS_DOWN
else:
exp_status = constants.NET_STATUS_ACTIVE
self.assertEqual(exp_status, q_rtr['status'])
def test_synchronize_router_nsx_mapping_not_found(self):
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a router down to verify synchronization
lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
q_rtr_id = self._get_tag_dict(
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
# delete router mapping from db.
db.delete_neutron_nsx_router_mapping(ctx.session, q_rtr_id)
# pop router from fake nsx client
router_data = self.fc._fake_lrouter_dict.pop(lr_uuid)
self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
# Reload from db
q_routers = self._plugin.get_routers(ctx)
for q_rtr in q_routers:
if q_rtr['id'] == q_rtr_id:
exp_status = constants.NET_STATUS_ERROR
else:
exp_status = constants.NET_STATUS_ACTIVE
self.assertEqual(exp_status, q_rtr['status'])
# put the router database since we don't handle missing
# router data in the fake nsx api_client
self.fc._fake_lrouter_dict[lr_uuid] = router_data
def test_synchronize_router_on_get(self):
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
ctx = context.get_admin_context()
with self._populate_data(ctx):
# Put a router down to verify punctual synchronization
lr_uuid = self.fc._fake_lrouter_dict.keys()[0]
q_rtr_id = self._get_tag_dict(
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
q_rtr_data = self._plugin.get_router(ctx, q_rtr_id)
self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status'])
def test_sync_nsx_failure_backoff(self):
self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout
# chunk size won't matter here
sp = sync.SyncParameters(999)
for i in range(10):
self.assertEqual(
min(64, 2 ** i),
self._plugin._synchronizer._synchronize_state(sp))
| apache-2.0 | 5,512,301,682,407,519,000 | 45.051532 | 79 | 0.583275 | false |
mPowering/django-orb | orb/courses/export.py | 1 | 3124 | # -*- coding: utf-8 -*-
"""
Base course export functionality
"""
from __future__ import unicode_literals
import hashlib
import time
import markdown
from autoslugged.settings import slugify
from django.utils.functional import cached_property
from six import text_type
from typing import Dict # noqa
from typing import Text # noqa
def format_page_as_markdown(activity):
# type: (Dict) -> Text
"""Create an HTML fo
rmatted page from a simple course activity"""
header = "# {}\n\n".format(activity["intro"])
content = header + activity["content"]
return markdown.markdown(content)
def sequenced_string(sequence):
"""Returns the elements of sequence as a comma separated string
>>> sequenced_string([1, 2 ,3])
"1,2,3"
"""
return ",".join([str(i) for i in sequence])
class CourseExport(object):
"""
Builds structure for exporting to an Oppia compatible format
It starts from a base zip file that contains the common folders
and files in an Oppia export.
For each resource (file): write it to resources/file-name.
The metadata goes in module.xml
We track order of sections and order of activities in sectiosn
everythign has a digest.
"""
default_filename = ""
def __init__(self, name, id, sections=None, activities=None, version=1, **kwargs):
"""
Args:
name: name of the course
id: given ID in the system
content: all course content in a list by sections
**kwargs:
"""
self.sections = [] if sections is None else sections
self.activities = [] if activities is None else activities
for activity in self.activities:
activity["section"] += 1
activity["digest"] = hashlib.md5(
text_type(activity["id"]) + activity["type"]
).hexdigest()
# Make sure sections IDs start at 1
for section in self.sections:
section["id"] += 1
section["activities"] = [
activity
for activity in self.activities
if activity["section"] == section["id"]
]
self.name = name
self.slug = slugify(name)
self.courseid = str(id)
self.version = version
self.backup_filename = kwargs.pop("backup_filename", self.default_filename)
self.validate_backup_filename()
def validate_backup_filename(self):
raise NotImplementedError("Export class must define validate_backup_filename")
def _by_resource_type(self, resource_type):
for course_resource in self.activities:
if course_resource["type"] == resource_type:
yield course_resource
def pages(self):
return self._by_resource_type("page")
def resources(self):
"""Returns only resource activities"""
return self._by_resource_type("resource")
@cached_property
def backup_date(self):
# type: () -> text_type
"""Returns the backup date/time in epoch seconds"""
return "{}".format(int(time.time()))
| gpl-3.0 | -4,556,169,896,410,257,000 | 27.144144 | 86 | 0.618118 | false |
b-randon/gImage | Python/settings_window.py | 1 | 15481 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\B\Desktop\git\gImage\UI\settings_window.ui'
#
# Created: Fri Dec 02 22:57:01 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(412, 300)
Form.setMaximumSize(QtCore.QSize(500, 300))
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setObjectName("gridLayout")
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.tabWidget = QtGui.QTabWidget(Form)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtGui.QWidget()
self.tab.setObjectName("tab")
self.frame_3 = QtGui.QFrame(self.tab)
self.frame_3.setGeometry(QtCore.QRect(0, 10, 221, 201))
self.frame_3.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtGui.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.gridLayout_2 = QtGui.QGridLayout(self.frame_3)
self.gridLayout_2.setObjectName("gridLayout_2")
self.bedlength_line = QtGui.QLineEdit(self.frame_3)
self.bedlength_line.setObjectName("bedlength_line")
self.gridLayout_2.addWidget(self.bedlength_line, 2, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.frame_3)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 3, 0, 1, 1)
self.laseron_line = QtGui.QLineEdit(self.frame_3)
self.laseron_line.setObjectName("laseron_line")
self.gridLayout_2.addWidget(self.laseron_line, 4, 1, 1, 1)
self.label_2 = QtGui.QLabel(self.frame_3)
self.label_2.setObjectName("label_2")
self.gridLayout_2.addWidget(self.label_2, 2, 0, 1, 1)
self.label_6 = QtGui.QLabel(self.frame_3)
self.label_6.setObjectName("label_6")
self.gridLayout_2.addWidget(self.label_6, 3, 2, 1, 1)
self.label_7 = QtGui.QLabel(self.frame_3)
self.label_7.setObjectName("label_7")
self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
self.idle_line = QtGui.QLineEdit(self.frame_3)
self.idle_line.setObjectName("idle_line")
self.gridLayout_2.addWidget(self.idle_line, 7, 1, 1, 1)
self.focus_line = QtGui.QLineEdit(self.frame_3)
self.focus_line.setObjectName("focus_line")
self.gridLayout_2.addWidget(self.focus_line, 3, 1, 1, 1)
self.laserpwr_line = QtGui.QLineEdit(self.frame_3)
self.laserpwr_line.setObjectName("laserpwr_line")
self.gridLayout_2.addWidget(self.laserpwr_line, 5, 1, 1, 1)
self.laseroff_line = QtGui.QLineEdit(self.frame_3)
self.laseroff_line.setObjectName("laseroff_line")
self.gridLayout_2.addWidget(self.laseroff_line, 6, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.frame_3)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 6, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.frame_3)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 1, 2, 1, 1)
self.label = QtGui.QLabel(self.frame_3)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 1, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.frame_3)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 5, 0, 1, 1)
self.bedwidth_line = QtGui.QLineEdit(self.frame_3)
self.bedwidth_line.setObjectName("bedwidth_line")
self.gridLayout_2.addWidget(self.bedwidth_line, 1, 1, 1, 1)
self.label_10 = QtGui.QLabel(self.frame_3)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 7, 0, 1, 1)
self.label_11 = QtGui.QLabel(self.frame_3)
self.label_11.setObjectName("label_11")
self.gridLayout_2.addWidget(self.label_11, 7, 2, 1, 1)
self.label_5 = QtGui.QLabel(self.frame_3)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 2, 2, 1, 1)
self.verticalLayoutWidget = QtGui.QWidget(self.tab)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(240, 20, 101, 89))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_4 = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_4.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_33 = QtGui.QLabel(self.verticalLayoutWidget)
self.label_33.setObjectName("label_33")
self.verticalLayout_4.addWidget(self.label_33)
self.baud_rate_combo = QtGui.QComboBox(self.verticalLayoutWidget)
self.baud_rate_combo.setObjectName("baud_rate_combo")
self.verticalLayout_4.addWidget(self.baud_rate_combo)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem)
self.calibrate_button = QtGui.QPushButton(self.verticalLayoutWidget)
self.calibrate_button.setObjectName("calibrate_button")
self.verticalLayout_4.addWidget(self.calibrate_button)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.frame = QtGui.QFrame(self.tab_2)
self.frame.setGeometry(QtCore.QRect(220, 10, 161, 181))
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName("frame")
self.verticalLayout_2 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_16 = QtGui.QLabel(self.frame)
self.label_16.setObjectName("label_16")
self.verticalLayout_2.addWidget(self.label_16)
self.startgcode_edit = QtGui.QTextEdit(self.frame)
self.startgcode_edit.setObjectName("startgcode_edit")
self.verticalLayout_2.addWidget(self.startgcode_edit)
self.label_17 = QtGui.QLabel(self.frame)
self.label_17.setObjectName("label_17")
self.verticalLayout_2.addWidget(self.label_17)
self.endgcode_edit = QtGui.QTextEdit(self.frame)
self.endgcode_edit.setObjectName("endgcode_edit")
self.verticalLayout_2.addWidget(self.endgcode_edit)
self.frame_2 = QtGui.QFrame(self.tab_2)
self.frame_2.setGeometry(QtCore.QRect(10, 10, 191, 181))
self.frame_2.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtGui.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.gridLayout_4 = QtGui.QGridLayout(self.frame_2)
self.gridLayout_4.setObjectName("gridLayout_4")
self.minpwr_line = QtGui.QLineEdit(self.frame_2)
self.minpwr_line.setObjectName("minpwr_line")
self.gridLayout_4.addWidget(self.minpwr_line, 1, 1, 1, 1)
self.label_12 = QtGui.QLabel(self.frame_2)
self.label_12.setObjectName("label_12")
self.gridLayout_4.addWidget(self.label_12, 1, 0, 1, 1)
self.label_14 = QtGui.QLabel(self.frame_2)
self.label_14.setObjectName("label_14")
self.gridLayout_4.addWidget(self.label_14, 3, 0, 1, 1)
self.feedrate_line = QtGui.QLineEdit(self.frame_2)
self.feedrate_line.setObjectName("feedrate_line")
self.gridLayout_4.addWidget(self.feedrate_line, 3, 1, 1, 1)
self.label_13 = QtGui.QLabel(self.frame_2)
self.label_13.setObjectName("label_13")
self.gridLayout_4.addWidget(self.label_13, 2, 0, 1, 1)
self.maxpwr_line = QtGui.QLineEdit(self.frame_2)
self.maxpwr_line.setObjectName("maxpwr_line")
self.gridLayout_4.addWidget(self.maxpwr_line, 2, 1, 1, 1)
self.label_15 = QtGui.QLabel(self.frame_2)
self.label_15.setObjectName("label_15")
self.gridLayout_4.addWidget(self.label_15, 3, 2, 1, 1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.addmaterial_button = QtGui.QPushButton(self.frame_2)
self.addmaterial_button.setObjectName("addmaterial_button")
self.horizontalLayout.addWidget(self.addmaterial_button)
self.delmaterial_button = QtGui.QPushButton(self.frame_2)
self.delmaterial_button.setObjectName("delmaterial_button")
self.horizontalLayout.addWidget(self.delmaterial_button)
self.gridLayout_4.addLayout(self.horizontalLayout, 0, 2, 1, 1)
self.material_combobox = QtGui.QComboBox(self.frame_2)
self.material_combobox.setObjectName("material_combobox")
self.gridLayout_4.addWidget(self.material_combobox, 0, 0, 1, 2)
self.tabWidget.addTab(self.tab_2, "")
self.tab_3 = QtGui.QWidget()
self.tab_3.setObjectName("tab_3")
self.gridLayoutWidget = QtGui.QWidget(self.tab_3)
self.gridLayoutWidget.setGeometry(QtCore.QRect(10, 10, 111, 109))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.gridLayout_3 = QtGui.QGridLayout(self.gridLayoutWidget)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.yautosize_line = QtGui.QLineEdit(self.gridLayoutWidget)
self.yautosize_line.setObjectName("yautosize_line")
self.gridLayout_3.addWidget(self.yautosize_line, 5, 1, 1, 1)
self.xautosize_line = QtGui.QLineEdit(self.gridLayoutWidget)
self.xautosize_line.setObjectName("xautosize_line")
self.gridLayout_3.addWidget(self.xautosize_line, 2, 1, 1, 1)
self.label_20 = QtGui.QLabel(self.gridLayoutWidget)
self.label_20.setObjectName("label_20")
self.gridLayout_3.addWidget(self.label_20, 2, 0, 1, 1)
self.label_21 = QtGui.QLabel(self.gridLayoutWidget)
self.label_21.setObjectName("label_21")
self.gridLayout_3.addWidget(self.label_21, 5, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.gridLayoutWidget)
self.label_19.setObjectName("label_19")
self.gridLayout_3.addWidget(self.label_19, 5, 2, 1, 1)
self.label_18 = QtGui.QLabel(self.gridLayoutWidget)
self.label_18.setObjectName("label_18")
self.gridLayout_3.addWidget(self.label_18, 2, 2, 1, 1)
self.autoset_radio = QtGui.QRadioButton(self.gridLayoutWidget)
self.autoset_radio.setObjectName("autoset_radio")
self.gridLayout_3.addWidget(self.autoset_radio, 0, 0, 1, 3)
self.tabWidget.addTab(self.tab_3, "")
self.verticalLayout.addWidget(self.tabWidget)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.accept_button = QtGui.QPushButton(Form)
self.accept_button.setObjectName("accept_button")
self.horizontalLayout_2.addWidget(self.accept_button)
self.close_button = QtGui.QPushButton(Form)
self.close_button.setObjectName("close_button")
self.horizontalLayout_2.addWidget(self.close_button)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.gridLayout.addLayout(self.verticalLayout, 0, 0, 1, 1)
self.retranslateUi(Form)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtGui.QApplication.translate("Form", "Form", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("Form", "Focus Distance", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("Form", "Bed Length", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("Form", "mm", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("Form", "Laser On Cmd.", None, QtGui.QApplication.UnicodeUTF8))
self.label_8.setText(QtGui.QApplication.translate("Form", "Laser Off Cmd.", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("Form", "mm", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("Form", "Bed Width", None, QtGui.QApplication.UnicodeUTF8))
self.label_9.setText(QtGui.QApplication.translate("Form", "Laser Pwr. Cmd.", None, QtGui.QApplication.UnicodeUTF8))
self.label_10.setText(QtGui.QApplication.translate("Form", "On Idle Delay", None, QtGui.QApplication.UnicodeUTF8))
self.label_11.setText(QtGui.QApplication.translate("Form", "s", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("Form", "mm", None, QtGui.QApplication.UnicodeUTF8))
self.label_33.setText(QtGui.QApplication.translate("Form", "Baud Rate", None, QtGui.QApplication.UnicodeUTF8))
self.calibrate_button.setText(QtGui.QApplication.translate("Form", "Calibrate", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), QtGui.QApplication.translate("Form", "Machine", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setText(QtGui.QApplication.translate("Form", "Starting Gcode", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setText(QtGui.QApplication.translate("Form", "Ending Gcode", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setText(QtGui.QApplication.translate("Form", "Min Power", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setText(QtGui.QApplication.translate("Form", "Feedrate", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setText(QtGui.QApplication.translate("Form", "Max Power", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setText(QtGui.QApplication.translate("Form", "mm/s", None, QtGui.QApplication.UnicodeUTF8))
self.addmaterial_button.setText(QtGui.QApplication.translate("Form", "+", None, QtGui.QApplication.UnicodeUTF8))
self.delmaterial_button.setText(QtGui.QApplication.translate("Form", "-", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), QtGui.QApplication.translate("Form", "Gcode", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setText(QtGui.QApplication.translate("Form", "X", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setText(QtGui.QApplication.translate("Form", "y", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setText(QtGui.QApplication.translate("Form", "Pixels", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setText(QtGui.QApplication.translate("Form", "Pixels", None, QtGui.QApplication.UnicodeUTF8))
self.autoset_radio.setText(QtGui.QApplication.translate("Form", "Auto Set Size", None, QtGui.QApplication.UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), QtGui.QApplication.translate("Form", "Application", None, QtGui.QApplication.UnicodeUTF8))
self.accept_button.setText(QtGui.QApplication.translate("Form", "Accept", None, QtGui.QApplication.UnicodeUTF8))
self.close_button.setText(QtGui.QApplication.translate("Form", "Close", None, QtGui.QApplication.UnicodeUTF8))
| gpl-3.0 | -8,510,505,441,863,688,000 | 62.187755 | 160 | 0.692526 | false |
Kjili/analysis-preservation.cern.ch | cap/modules/cache/__init__.py | 9 | 1269 | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
#
"""Cache module."""
from __future__ import absolute_import, print_function
from .ext import CAPCache
from .proxies import current_cache
__all__ = ('CAPCache', 'current_cache')
| gpl-2.0 | -4,389,507,737,048,496,000 | 37.454545 | 78 | 0.760441 | false |
palisadoes/switchmap-ng | switchmap/test/_do_all_tests.py | 1 | 3884 | #!/usr/bin/env python3
"""Script to test all the switchmap-ng unittests.
1) This script runs each unittest script in switchmap-ng's
switchmap.test module.
2) The only scripts run in the module are those whose names
start with 'test_'
3) The unittests will only run on a test database whose name
starts with 'test_'
4) All unittest scripts must be able to successfully run independently
of all others.
"""
import locale
import os
import sys
import subprocess
# Try to create a working PYTHONPATH
TEST_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
SWITCHMAP_DIRECTORY = os.path.abspath(os.path.join(TEST_DIRECTORY, os.pardir))
ROOT_DIRECTORY = os.path.abspath(os.path.join(SWITCHMAP_DIRECTORY, os.pardir))
if TEST_DIRECTORY.endswith('/switchmap-ng/switchmap/test') is True:
sys.path.append(ROOT_DIRECTORY)
else:
print(
'This script is not installed in the "switchmap-ng/bin" directory. '
'Please fix.')
sys.exit(2)
# switchmap-ng libraries
try:
from switchmap.utils import general
except:
print('You need to set your PYTHONPATH to include the switchmap library')
sys.exit(2)
from switchmap.test import unittest_setup
def main():
"""Test all the switchmap-ng modules with unittests.
Args:
None
Returns:
None
"""
# Determine unittest directory
root_dir = general.root_directory()
test_dir = '{}/switchmap/test'.format(root_dir)
# Get list of test files
test_files = os.listdir(test_dir)
for filename in sorted(test_files):
full_path = '{}/{}'.format(test_dir, filename)
# Run the test
if filename.startswith('test_'):
run_script(full_path)
# Print
message = ('\nHooray - All Done OK!\n')
print(message)
def run_script(cli_string):
"""Run the cli_string UNIX CLI command and record output.
Args:
None
Returns:
None
"""
# Initialize key variables
encoding = locale.getdefaultlocale()[1]
slurpy_returncode = ('----- switchmap-ng Return Code '
'----------------------------------------')
slurpy_stdoutdata = ('----- switchmap-ng Test Output '
'----------------------------------------')
slurpy_stderrdata = ('----- switchmap-ng Test Error '
'-----------------------------------------')
# Say what we are doing
string2print = '\nRunning Command: {}'.format(cli_string)
print(string2print)
# Run update_devices script
do_command_list = list(cli_string.split(' '))
# Create the subprocess object
process = subprocess.Popen(
do_command_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = process.communicate()
returncode = process.returncode
# Crash if the return code is not 0
if returncode != 0:
# Print the Return Code header
string2print = '\n{}'.format(slurpy_returncode)
print(string2print)
# Print the Return Code
string2print = '\n{}'.format(returncode)
print(string2print)
# Print the STDOUT header
string2print = '\n{}\n'.format(slurpy_stdoutdata)
print(string2print)
# Print the STDOUT
for line in stdoutdata.decode(encoding).split('\n'):
string2print = '{}'.format(line)
print(string2print)
# Print the STDERR header
string2print = '\n{}\n'.format(slurpy_stderrdata)
print(string2print)
# Print the STDERR
for line in stderrdata.decode(encoding).split('\n'):
string2print = '{}'.format(line)
print(string2print)
# All done
sys.exit(2)
if __name__ == '__main__':
# Test the configuration variables
unittest_setup.ready()
# Do the unit test
main()
| apache-2.0 | -4,210,289,437,080,702,000 | 26.160839 | 78 | 0.607106 | false |
jmmartinez84/yowsup | yowsup/demos/cli/stack.py | 28 | 1038 | from yowsup.stacks import YowStackBuilder
from .layer import YowsupCliLayer
from yowsup.layers.auth import AuthError
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowAuthenticationProtocolLayer
import sys
class YowsupCliStack(object):
def __init__(self, credentials, encryptionEnabled = True):
stackBuilder = YowStackBuilder()
self.stack = stackBuilder\
.pushDefaultLayers(encryptionEnabled)\
.push(YowsupCliLayer)\
.build()
# self.stack.setCredentials(credentials)
self.stack.setCredentials(credentials)
def start(self):
print("Yowsup Cli client\n==================\nType /help for available commands\n")
self.stack.broadcastEvent(YowLayerEvent(YowsupCliLayer.EVENT_START))
try:
self.stack.loop(timeout = 0.5, discrete = 0.5)
except AuthError as e:
print("Auth Error, reason %s" % e)
except KeyboardInterrupt:
print("\nYowsdown")
sys.exit(0)
| gpl-3.0 | -3,670,054,124,775,979,500 | 33.6 | 91 | 0.659923 | false |
40223108/w18 | body.py | 15 | 11491 |
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class MAN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2015CDA 協同專案下的 cdag7模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="body">執行 body</a><br /><br />
<a href="assembly">執行 hand</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*man2.py 完全利用函式呼叫進行組立*/
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
////////////////////////////////////////////////
// axis_plane_assembly 組立函式
////////////////////////////////////////////////
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly 採 align 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly2 採 mate 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly2(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly2() 函式, 主要採三面 MATE 組立
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/////////////////////////////////////////////////////////////////
// 開始執行組立, 全部採函式呼叫組立
/////////////////////////////////////////////////////////////////
// Body 與空組立檔案採三個平面約束組立
// 空組立面為 ASM_TOP, ASM_FRONT, ASM_RIGHT
// Body 組立面為 TOP, FRONT, RIGHT
// 若 featID=0 表示為空組立檔案, 而且函式會傳回第一個組立件的 featID
var featID = three_plane_assembly(session, assembly, transf, 0, 0, "LEGO_BODY.prt", "ASM_TOP", "ASM_FRONT", "ASM_RIGHT", "TOP", "FRONT", "RIGHT");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring | gpl-3.0 | 9,147,876,049,154,187,000 | 38.57529 | 149 | 0.66104 | false |
algorhythms/LeetCode | 385 Mini Parser Nested Integer.py | 1 | 3279 | """
Given a nested list of integers represented as a string, implement a parser to deserialize it.
Each element is either an integer, or a list -- whose elements may also be integers or other lists.
Note: You may assume that the string is well-formed:
String is non-empty.
String does not contain white spaces.
String contains only digits 0-9, [, - ,, ].
Example 1:
Given s = "324",
You should return a NestedInteger object which contains a single integer 324.
Example 2:
Given s = "[123,[456,[789]]]",
Return a NestedInteger object containing a nested list with 2 elements:
1. An integer containing value 123.
2. A nested list containing two elements:
i. An integer containing value 456.
ii. A nested list with one element:
a. An integer containing value 789.
"""
__author__ = 'Daniel'
# """
# This is the interface that allows for creating nested lists.
# You should not implement it, or speculate about its implementation
# """
class NestedInteger(object):
def __init__(self, value=None):
"""
If value is not specified, initializes an empty list.
Otherwise initializes a single integer equal to value.
"""
def isInteger(self):
"""
@return True if this NestedInteger holds a single integer, rather than a nested list.
:rtype bool
"""
def add(self, elem):
"""
Set this NestedInteger to hold a nested list and adds a nested integer elem to it.
:rtype void
"""
def setInteger(self, value):
"""
Set this NestedInteger to hold a single integer equal to value.
:rtype void
"""
def getInteger(self):
"""
@return the single integer that this NestedInteger holds, if it holds a single integer
Return None if this NestedInteger holds a nested list
:rtype int
"""
def getList(self):
"""
@return the nested list that this NestedInteger holds, if it holds a nested list
Return None if this NestedInteger holds a single integer
:rtype List[NestedInteger]
"""
class Solution(object):
def deserialize(self, s):
"""
NestedInteger is a UnionType in functional programming jargon.
[1, [1, [2]], 3, 4]
From a general example, develop an algorithm using stack
The algorithm itself is easy, but the string parsing contains lots of edge cases
:type s: str
:rtype: NestedInteger
"""
if not s: return None
stk = []
i = 0
while i < len(s):
if s[i] == '[':
stk.append(NestedInteger())
i += 1
elif s[i] == ']':
ni = stk.pop()
if not stk: return ni
stk[-1].add(ni)
i += 1
elif s[i] == ',':
i += 1
else:
j = i
while j < len(s) and (s[j].isdigit() or s[j] == '-'): j += 1
ni = NestedInteger(int(s[i: j]) if s[i: j] else None)
if not stk: return ni
stk[-1].add(ni)
i = j
return stk.pop()
if __name__ == "__main__":
Solution().deserialize("[123,[456,[789]]]")
| mit | 815,430,519,940,870,300 | 25.658537 | 99 | 0.571211 | false |
guangxingli/python-neo | neo/test/iotest/test_exampleio.py | 13 | 1973 | # -*- coding: utf-8 -*-
"""
Tests of neo.io.exampleio
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division
try:
import unittest2 as unittest
except ImportError:
import unittest
from neo.io.exampleio import ExampleIO, HAVE_SCIPY
from neo.test.iotest.common_io_test import BaseTestIO
class TestExampleIO(BaseTestIO, unittest.TestCase, ):
ioclass = ExampleIO
files_to_test = ['fake1',
'fake2',
]
files_to_download = []
class TestExample2IO(unittest.TestCase):
@unittest.skipUnless(HAVE_SCIPY, "requires scipy")
def test_read_segment_lazy(self):
r = ExampleIO(filename=None)
seg = r.read_segment(cascade=True, lazy=True)
for ana in seg.analogsignals:
self.assertEqual(ana.size, 0)
assert hasattr(ana, 'lazy_shape')
for st in seg.spiketrains:
self.assertEqual(st.size, 0)
assert hasattr(st, 'lazy_shape')
seg = r.read_segment(cascade=True, lazy=False)
for ana in seg.analogsignals:
self.assertNotEqual(ana.size, 0)
for st in seg.spiketrains:
self.assertNotEqual(st.size, 0)
@unittest.skipUnless(HAVE_SCIPY, "requires scipy")
def test_read_segment_cascade(self):
r = ExampleIO(filename=None)
seg = r.read_segment(cascade=False)
self.assertEqual(len(seg.analogsignals), 0)
seg = r.read_segment(cascade=True, num_analogsignal=4)
self.assertEqual(len(seg.analogsignals), 4)
@unittest.skipUnless(HAVE_SCIPY, "requires scipy")
def test_read_analogsignal(self):
r = ExampleIO(filename=None)
r.read_analogsignal(lazy=False, segment_duration=15., t_start=-1)
@unittest.skipUnless(HAVE_SCIPY, "requires scipy")
def read_spiketrain(self):
r = ExampleIO(filename=None)
r.read_spiketrain(lazy=False,)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 455,342,504,990,647,400 | 29.828125 | 73 | 0.641156 | false |
diversys/wubi | src/openpgp/sap/pkt/ModificationDetectionCode.py | 9 | 1180 | """Modification detection code RFC 2440.5.14
The modification detection code contains a SHA-1 hash of the plaintext
in a decrypted symmetrically encrypted integrity protected packet.
"""
from Packet import Packet
class ModificationDetectionCode(Packet):
__doc__ = """Modification Detection Code Packet
""" + Packet._ivars
def __init__(self, *args, **kwords):
try:
self.fill(args[0])
except IndexError:
pass
def __str__(self):
return "<ModificationDetectionCode instance>"
def fill_body(self, d):
self.body = ModificationDetectionCodeBody(d)
class ModificationDetectionCodeBody:
"""Modification Detection Code
:IVariables:
- `hash`: string of 20 SHA-1 hashed octets
- `_d`: string used to build packet body (same as `hash`)
"""
def __init__(self, *args, **kwords):
try:
self.fill(args[0])
except IndexError:
pass
def fill(self, d):
if len(d) == 20:
self._d = self.hash = d
else:
raise PGPPacketError, "MDCode packet body must be 20 characters long, not->(%s)." % len(d)
| gpl-2.0 | 7,156,879,438,752,994,000 | 25.818182 | 102 | 0.60339 | false |
akretion/l10n-brazil | l10n_br_hr_contract/models/hr_contract.py | 1 | 3113 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Daniel Sadamo - KMEE Informática
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, fields, api
from odoo.addons.l10n_br_base.tools import fiscal
from odoo.exceptions import ValidationError
MONTHS = [
('1', 'January'),
('2', 'February'),
('3', 'March'),
('4', 'April'),
('5', 'May'),
('6', 'June'),
('7', 'July'),
('8', 'August'),
('9', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December'),
]
class HrContract(models.Model):
_inherit = 'hr.contract'
admission_type_id = fields.Many2one(
string='Admission type',
comodel_name='hr.contract.admission.type')
labor_bond_type_id = fields.Many2one(
string='Labor bond type',
comodel_name='hr.contract.labor.bond.type')
labor_regime_id = fields.Many2one(
string='Labor regime',
comodel_name='hr.contract.labor.regime',
help='e-Social: S2300 - tpRegPrev',
)
salary_unit = fields.Many2one(
string='Salary Unity',
comodel_name='hr.contract.salary.unit',
help='e-Social: S2300 - tpRegPrev',
)
weekly_hours = fields.Float(string='Weekly hours')
monthly_hours = fields.Float(string='Monthly hours')
partner_union = fields.Many2one(
string='Sindicato',
comodel_name='res.partner',
domain=[('union_entity_code', '!=', False)],
help='Sindicato é um partner que tem código de sindicato '
'(union_entity_code) definido.',
)
union = fields.Char(string='Union')
union_cnpj = fields.Char(string='Union CNPJ')
union_entity_code = fields.Char(
string='Union entity code',
related='partner_union.union_entity_code',
)
month_base_date = fields.Selection(string='Base date month',
selection=MONTHS)
discount_union_contribution = fields.Boolean(
string='Discount union contribution in admission')
resignation_date = fields.Date(string='Resignation date')
resignation_cause_id = fields.Many2one(
comodel_name='hr.contract.resignation.cause',
string='Resignation cause')
notice_of_termination_id = fields.Many2one(
string='Notice of termination type',
comodel_name='hr.contract.notice.termination'
)
notice_of_termination_date = fields.Date(
string='Notice of termination date')
notice_of_termination_payment_date = fields.Date(
string='Notice of termination payment date'
)
by_death = fields.Char(string='By death',
help='Death certificate/Process/Beneficiary')
resignation_code = fields.Char(related='resignation_cause_id.code',
invisible=True)
@api.multi
@api.constrains('union_cnpj')
def _validate_union_cnpj(self):
for record in self:
if record.union_cnpj:
if not fiscal.validate_cnpj(record.union_cnpj):
raise ValidationError("Invalid union CNPJ!")
| agpl-3.0 | 2,626,488,130,563,757,600 | 32.44086 | 72 | 0.609968 | false |
bigfatpaulyj/py-airfoil | scons-local-2.2.0/SCons/Tool/SCCS.py | 14 | 2415 | """SCons.Tool.SCCS.py
Tool-specific initialization for SCCS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/SCCS.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
SCCS to an Environment."""
def SCCSFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The SCCS() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action('$SCCSCOM', '$SCCSCOMSTR')
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'SCCS', SCCSFactory)
env.SCCS = SCCSFactory
env['SCCS'] = 'sccs'
env['SCCSFLAGS'] = SCons.Util.CLVar('')
env['SCCSGETFLAGS'] = SCons.Util.CLVar('')
env['SCCSCOM'] = '$SCCS $SCCSFLAGS get $SCCSGETFLAGS $TARGET'
def exists(env):
return env.Detect('sccs')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 | -4,677,073,048,198,365,000 | 36.734375 | 114 | 0.72381 | false |
bavardage/statsmodels | statsmodels/tsa/tests/test_adfuller_lag.py | 3 | 1836 | # -*- coding: utf-8 -*-
"""Test for autolag of adfuller, unitroot_adf
Created on Wed May 30 21:39:46 2012
Author: Josef Perktold
"""
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import statsmodels.tsa.stattools as tsast
from statsmodels.datasets import macrodata
def test_adf_autolag():
#see issue #246
#this is mostly a unit test
d2 = macrodata.load().data
for k_trend, tr in enumerate(['nc', 'c', 'ct', 'ctt']):
#[None:'nc', 0:'c', 1:'ct', 2:'ctt']
x = np.log(d2['realgdp'])
xd = np.diff(x)
#check exog
adf3 = tsast.adfuller(x, maxlag=None, autolag='aic',
regression=tr, store=True, regresults=True)
st2 = adf3[-1]
assert_equal(len(st2.autolag_results), 15 + 1) #+1 for lagged level
for l, res in sorted(st2.autolag_results.iteritems())[:5]:
lag = l-k_trend
#assert correct design matrices in _autolag
assert_equal(res.model.exog[-10:,k_trend], x[-11:-1])
assert_equal(res.model.exog[-1,k_trend+1:], xd[-lag:-1][::-1])
#min-ic lag of dfgls in Stata is also 2, or 9 for maic with notrend
assert_equal(st2.usedlag, 2)
#same result with lag fixed at usedlag of autolag
adf2 = tsast.adfuller(x, maxlag=2, autolag=None, regression=tr)
assert_almost_equal(adf3[:2], adf2[:2], decimal=12)
tr = 'c'
#check maxlag with autolag
adf3 = tsast.adfuller(x, maxlag=5, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 5 + 1)
adf3 = tsast.adfuller(x, maxlag=0, autolag='aic',
regression=tr, store=True, regresults=True)
assert_equal(len(adf3[-1].autolag_results), 0 + 1)
| bsd-3-clause | -5,684,089,984,794,177,000 | 36.469388 | 79 | 0.598039 | false |
blueboxgroup/nova | nova/db/sqlalchemy/migrate_repo/versions/247_nullable_mismatch.py | 32 | 1560 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import MetaData, Table
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
quota_usages = Table('quota_usages', meta, autoload=True)
quota_usages.c.resource.alter(nullable=False)
pci_devices = Table('pci_devices', meta, autoload=True)
pci_devices.c.deleted.alter(nullable=True)
pci_devices.c.product_id.alter(nullable=False)
pci_devices.c.vendor_id.alter(nullable=False)
pci_devices.c.dev_type.alter(nullable=False)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
quota_usages = Table('quota_usages', meta, autoload=True)
quota_usages.c.resource.alter(nullable=True)
pci_devices = Table('pci_devices', meta, autoload=True)
pci_devices.c.deleted.alter(nullable=False)
pci_devices.c.product_id.alter(nullable=True)
pci_devices.c.vendor_id.alter(nullable=True)
pci_devices.c.dev_type.alter(nullable=True)
| apache-2.0 | 7,505,489,582,137,865,000 | 35.27907 | 78 | 0.728846 | false |
eepalms/gem5-newcache | src/arch/arm/ArmTLB.py | 10 | 2852 | # -*- mode:python -*-
# Copyright (c) 2009 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
class ArmTableWalker(MemObject):
type = 'ArmTableWalker'
cxx_class = 'ArmISA::TableWalker'
cxx_header = "arch/arm/table_walker.hh"
port = MasterPort("Port for TableWalker to do walk the translation with")
sys = Param.System(Parent.any, "system object parameter")
num_squash_per_cycle = Param.Unsigned(2,
"Number of outstanding walks that can be squashed per cycle")
class ArmTLB(SimObject):
type = 'ArmTLB'
cxx_class = 'ArmISA::TLB'
cxx_header = "arch/arm/tlb.hh"
size = Param.Int(64, "TLB size")
walker = Param.ArmTableWalker(ArmTableWalker(), "HW Table walker")
| bsd-3-clause | 2,232,287,449,664,401,000 | 47.338983 | 77 | 0.768934 | false |
realsobek/freeipa | ipaclient/remote_plugins/2_114/sudocmdgroup.py | 8 | 14955 | #
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
# pylint: disable=unused-import
import six
from . import Command, Method, Object
from ipalib import api, parameters, output
from ipalib.parameters import DefaultFrom
from ipalib.plugable import Registry
from ipalib.text import _
from ipapython.dn import DN
from ipapython.dnsutil import DNSName
if six.PY3:
unicode = str
__doc__ = _("""
Groups of Sudo Commands
Manage groups of Sudo Commands.
EXAMPLES:
Add a new Sudo Command Group:
ipa sudocmdgroup-add --desc='administrators commands' admincmds
Remove a Sudo Command Group:
ipa sudocmdgroup-del admincmds
Manage Sudo Command Group membership, commands:
ipa sudocmdgroup-add-member --sudocmds=/usr/bin/less --sudocmds=/usr/bin/vim admincmds
Manage Sudo Command Group membership, commands:
ipa group-remove-member --sudocmds=/usr/bin/less admincmds
Show a Sudo Command Group:
ipa group-show localadmins
""")
register = Registry()
@register()
class sudocmdgroup(Object):
takes_params = (
parameters.Str(
'cn',
primary_key=True,
label=_(u'Sudo Command Group'),
),
parameters.Str(
'description',
required=False,
label=_(u'Description'),
doc=_(u'Group description'),
),
parameters.Str(
'membercmd_sudocmd',
required=False,
label=_(u'Commands'),
),
parameters.Str(
'membercmd_sudocmdgroup',
required=False,
label=_(u'Sudo Command Groups'),
),
parameters.Str(
'member_sudocmd',
required=False,
label=_(u'Member Sudo commands'),
),
)
@register()
class sudocmdgroup_add(Method):
__doc__ = _("Create new Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Group description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class sudocmdgroup_add_member(Method):
__doc__ = _("Add members to Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'sudocmd',
required=False,
multivalue=True,
cli_name='sudocmds',
label=_(u'member sudo command'),
doc=_(u'sudo commands to add'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be added'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members added'),
),
)
@register()
class sudocmdgroup_del(Method):
__doc__ = _("Delete Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
multivalue=True,
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'continue',
doc=_(u"Continuous mode: Don't stop on errors."),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Output(
'result',
dict,
doc=_(u'List of deletions that failed'),
),
output.ListOfPrimaryKeys(
'value',
),
)
@register()
class sudocmdgroup_find(Method):
__doc__ = _("Search for Sudo Command Groups.")
takes_args = (
parameters.Str(
'criteria',
required=False,
doc=_(u'A string searched in all relevant object attributes'),
),
)
takes_options = (
parameters.Str(
'cn',
required=False,
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Group description'),
),
parameters.Int(
'timelimit',
required=False,
label=_(u'Time Limit'),
doc=_(u'Time limit of search in seconds'),
),
parameters.Int(
'sizelimit',
required=False,
label=_(u'Size Limit'),
doc=_(u'Maximum number of entries returned'),
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Flag(
'pkey_only',
required=False,
label=_(u'Primary key only'),
doc=_(u'Results should contain primary key attribute only ("sudocmdgroup-name")'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.ListOfEntries(
'result',
),
output.Output(
'count',
int,
doc=_(u'Number of entries returned'),
),
output.Output(
'truncated',
bool,
doc=_(u'True if not all results were returned'),
),
)
@register()
class sudocmdgroup_mod(Method):
__doc__ = _("Modify Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Str(
'description',
required=False,
cli_name='desc',
label=_(u'Description'),
doc=_(u'Group description'),
),
parameters.Str(
'setattr',
required=False,
multivalue=True,
doc=_(u'Set an attribute to a name/value pair. Format is attr=value.\nFor multi-valued attributes, the command replaces the values already present.'),
exclude=('webui',),
),
parameters.Str(
'addattr',
required=False,
multivalue=True,
doc=_(u'Add an attribute/value pair. Format is attr=value. The attribute\nmust be part of the schema.'),
exclude=('webui',),
),
parameters.Str(
'delattr',
required=False,
multivalue=True,
doc=_(u'Delete an attribute/value pair. The option will be evaluated\nlast, after all sets and adds.'),
exclude=('webui',),
),
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
@register()
class sudocmdgroup_remove_member(Method):
__doc__ = _("Remove members from Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
parameters.Str(
'sudocmd',
required=False,
multivalue=True,
cli_name='sudocmds',
label=_(u'member sudo command'),
doc=_(u'sudo commands to remove'),
alwaysask=True,
),
)
has_output = (
output.Entry(
'result',
),
output.Output(
'failed',
dict,
doc=_(u'Members that could not be removed'),
),
output.Output(
'completed',
int,
doc=_(u'Number of members removed'),
),
)
@register()
class sudocmdgroup_show(Method):
__doc__ = _("Display Sudo Command Group.")
takes_args = (
parameters.Str(
'cn',
cli_name='sudocmdgroup_name',
label=_(u'Sudo Command Group'),
no_convert=True,
),
)
takes_options = (
parameters.Flag(
'rights',
label=_(u'Rights'),
doc=_(u'Display the access rights of this entry (requires --all). See ipa man page for details.'),
default=False,
autofill=True,
),
parameters.Flag(
'all',
doc=_(u'Retrieve and print all attributes from the server. Affects command output.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'raw',
doc=_(u'Print entries as stored on the server. Only affects output format.'),
exclude=('webui',),
default=False,
autofill=True,
),
parameters.Flag(
'no_members',
doc=_(u'Suppress processing of membership attributes.'),
exclude=('webui', 'cli'),
default=False,
autofill=True,
),
)
has_output = (
output.Output(
'summary',
(unicode, type(None)),
doc=_(u'User-friendly description of action performed'),
),
output.Entry(
'result',
),
output.PrimaryKey(
'value',
doc=_(u"The primary_key value of the entry, e.g. 'jdoe' for a user"),
),
)
| gpl-3.0 | 2,376,423,638,030,372,400 | 26.694444 | 162 | 0.494751 | false |
SWENG500-Team1/FitnessForSplunk | fitness_for_splunk/bin/oauth2client/contrib/django_util/__init__.py | 9 | 11139 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the Django web framework
Provides Django views and helpers the make using the OAuth2 web server
flow easier. It includes an ``oauth_required`` decorator to automatically ensure
that user credentials are available, and an ``oauth_enabled`` decorator to check
if the user has authorized, and helper shortcuts to create the authorization
URL otherwise.
Only Django versions 1.8+ are supported.
Configuration
=============
To configure, you'll need a set of OAuth2 web application credentials from
`Google Developer's Console <https://console.developers.google.com/project/_/apiui/credential>`.
Add the helper to your INSTALLED_APPS:
.. code-block:: python
:caption: settings.py
:name: installed_apps
INSTALLED_APPS = (
# other apps
"oauth2client.contrib.django_util"
)
Add the client secrets created earlier to the settings. You can either
specify the path to the credentials file in JSON format
.. code-block:: python
:caption: settings.py
:name: secrets_file
GOOGLE_OAUTH2_CLIENT_SECRETS_JSON=/path/to/client-secret.json
Or, directly configure the client Id and client secret.
.. code-block:: python
:caption: settings.py
:name: secrets_config
GOOGLE_OAUTH2_CLIENT_ID=client-id-field
GOOGLE_OAUTH2_CLIENT_SECRET=client-secret-field
By default, the default scopes for the required decorator only contains the
``email`` scopes. You can change that default in the settings.
.. code-block:: python
:caption: settings.py
:name: scopes
GOOGLE_OAUTH2_SCOPES = ('email', 'https://www.googleapis.com/auth/calendar',)
By default, the decorators will add an `oauth` object to the Django request
object, and include all of its state and helpers inside that object. If the
`oauth` name conflicts with another usage, it can be changed
.. code-block:: python
:caption: settings.py
:name: request_prefix
# changes request.oauth to request.google_oauth
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'google_oauth'
Add the oauth2 routes to your application's urls.py urlpatterns.
.. code-block:: python
:caption: urls.py
:name: urls
from oauth2client.contrib.django_util.site import urls as oauth2_urls
urlpatterns += [url(r'^oauth2/', include(oauth2_urls))]
To require OAuth2 credentials for a view, use the `oauth2_required` decorator.
This creates a credentials object with an id_token, and allows you to create an
`http` object to build service clients with. These are all attached to the
request.oauth
.. code-block:: python
:caption: views.py
:name: views_required
from oauth2client.contrib.django_util.decorators import oauth_required
@oauth_required
def requires_default_scopes(request):
email = request.oauth.credentials.id_token['email']
service = build(serviceName='calendar', version='v3',
http=request.oauth.http,
developerKey=API_KEY)
events = service.events().list(calendarId='primary').execute()['items']
return HttpResponse("email: %s , calendar: %s" % (email, str(events)))
To make OAuth2 optional and provide an authorization link in your own views.
.. code-block:: python
:caption: views.py
:name: views_enabled2
from oauth2client.contrib.django_util.decorators import oauth_enabled
@oauth_enabled
def optional_oauth2(request):
if request.oauth.has_credentials():
# this could be passed into a view
# request.oauth.http is also initialized
return HttpResponse("User email: %s"
% request.oauth.credentials.id_token['email'])
else:
return HttpResponse('Here is an OAuth Authorize link:
<a href="%s">Authorize</a>' % request.oauth.get_authorize_redirect())
If a view needs a scope not included in the default scopes specified in
the settings, you can use [incremental auth](https://developers.google.com/identity/sign-in/web/incremental-auth)
and specify additional scopes in the decorator arguments.
.. code-block:: python
:caption: views.py
:name: views_required_additional_scopes
@oauth_enabled(scopes=['https://www.googleapis.com/auth/drive'])
def drive_required(request):
if request.oauth.has_credentials():
service = build(serviceName='drive', version='v2',
http=request.oauth.http,
developerKey=API_KEY)
events = service.files().list().execute()['items']
return HttpResponse(str(events))
else:
return HttpResponse('Here is an OAuth Authorize link:
<a href="%s">Authorize</a>' % request.oauth.get_authorize_redirect())
To provide a callback on authorization being completed, use the
oauth2_authorized signal:
.. code-block:: python
:caption: views.py
:name: signals
from oauth2client.contrib.django_util.signals import oauth2_authorized
def test_callback(sender, request, credentials, **kwargs):
print "Authorization Signal Received %s" % credentials.id_token['email']
oauth2_authorized.connect(test_callback)
"""
import django.conf
from django.core import exceptions
from django.core import urlresolvers
import httplib2
from oauth2client import clientsecrets
from oauth2client.contrib.django_util import storage
from six.moves.urllib import parse
GOOGLE_OAUTH2_DEFAULT_SCOPES = ('email',)
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE = 'oauth'
def _load_client_secrets(filename):
"""Loads client secrets from the given filename."""
client_type, client_info = clientsecrets.loadfile(filename)
if client_type != clientsecrets.TYPE_WEB:
raise ValueError(
'The flow specified in {} is not supported, only the WEB flow '
'type is supported.'.format(client_type))
return client_info['client_id'], client_info['client_secret']
def _get_oauth2_client_id_and_secret(settings_instance):
"""Initializes client id and client secret based on the settings"""
secret_json = getattr(django.conf.settings,
'GOOGLE_OAUTH2_CLIENT_SECRETS_JSON', None)
if secret_json is not None:
return _load_client_secrets(secret_json)
else:
client_id = getattr(settings_instance, "GOOGLE_OAUTH2_CLIENT_ID",
None)
client_secret = getattr(settings_instance,
"GOOGLE_OAUTH2_CLIENT_SECRET", None)
if client_id is not None and client_secret is not None:
return client_id, client_secret
else:
raise exceptions.ImproperlyConfigured(
"Must specify either GOOGLE_OAUTH2_CLIENT_SECRETS_JSON, or "
" both GOOGLE_OAUTH2_CLIENT_ID and GOOGLE_OAUTH2_CLIENT_SECRET "
"in settings.py")
class OAuth2Settings(object):
"""Initializes Django OAuth2 Helper Settings
This class loads the OAuth2 Settings from the Django settings, and then
provides those settings as attributes to the rest of the views and
decorators in the module.
Attributes:
scopes: A list of OAuth2 scopes that the decorators and views will use
as defaults
request_prefix: The name of the attribute that the decorators use to
attach the UserOAuth2 object to the Django request object.
client_id: The OAuth2 Client ID
client_secret: The OAuth2 Client Secret
"""
def __init__(self, settings_instance):
self.scopes = getattr(settings_instance, 'GOOGLE_OAUTH2_SCOPES',
GOOGLE_OAUTH2_DEFAULT_SCOPES)
self.request_prefix = getattr(settings_instance,
'GOOGLE_OAUTH2_REQUEST_ATTRIBUTE',
GOOGLE_OAUTH2_REQUEST_ATTRIBUTE)
self.client_id, self.client_secret = \
_get_oauth2_client_id_and_secret(settings_instance)
if ('django.contrib.sessions.middleware.SessionMiddleware'
not in settings_instance.MIDDLEWARE_CLASSES):
raise exceptions.ImproperlyConfigured(
"The Google OAuth2 Helper requires session middleware to "
"be installed. Edit your MIDDLEWARE_CLASSES setting"
" to include 'django.contrib.sessions.middleware."
"SessionMiddleware'.")
oauth2_settings = OAuth2Settings(django.conf.settings)
def _redirect_with_params(url_name, *args, **kwargs):
"""Helper method to create a redirect response that uses GET URL
parameters."""
url = urlresolvers.reverse(url_name, args=args)
params = parse.urlencode(kwargs, True)
return "{0}?{1}".format(url, params)
class UserOAuth2(object):
"""Class to create oauth2 objects on Django request objects containing
credentials and helper methods.
"""
def __init__(self, request, scopes=None, return_url=None):
"""Initialize the Oauth2 Object
:param request: Django request object
:param scopes: Scopes desired for this OAuth2 flow
:param return_url: URL to return to after authorization is complete
:return:
"""
self.request = request
self.return_url = return_url or request.get_full_path()
self.scopes = set(oauth2_settings.scopes)
if scopes:
self.scopes |= set(scopes)
# make sure previously requested custom scopes are maintained
# in future authorizations
credentials = storage.get_storage(self.request).get()
if credentials:
self.scopes |= credentials.scopes
def get_authorize_redirect(self):
"""Creates a URl to start the OAuth2 authorization flow"""
get_params = {
'return_url': self.return_url,
'scopes': self.scopes
}
return _redirect_with_params('google_oauth:authorize',
**get_params)
def has_credentials(self):
"""Returns True if there are valid credentials for the current user
and required scopes."""
return (self.credentials and not self.credentials.invalid
and self.credentials.has_scopes(self.scopes))
@property
def credentials(self):
"""Gets the authorized credentials for this flow, if they exist"""
return storage.get_storage(self.request).get()
@property
def http(self):
"""Helper method to create an HTTP client authorized with OAuth2
credentials"""
if self.has_credentials():
return self.credentials.authorize(httplib2.Http())
return None
| mit | 4,711,334,333,327,672,000 | 35.283388 | 113 | 0.679235 | false |
MattWellie/PAGE_MPO | match_and_filter.py | 1 | 1983 | # Scratchy script to rematch all the variants I can find in the VCFs with the
# corresponding PP numbers, and peel out all the annotations present
import re
allele_freq = re.compile(";(?P<AF>[A-Z_35]*?_AF)=(?P<value>0\..*?);")
filtered_in = 'filtered_variant_results.txt'
unfiltered_in = 'all_variant_results.txt'
details_in = 'pilot_out.txt'
detail_dict = {}
# Create an index to get at the PP#### values using positions
with open(details_in, 'r') as handle:
for line in handle:
list = line.split('\t')
if list[0] == 'proband': continue
else:
PP = list[0]
pos = list[4]
chrom = list[3]
gene = list[5]
detail_dict['{}:{}'.format(chrom, pos)] = {'pp': PP,
'gene': gene}
allele_freqs = {}
with open(filtered_in, 'r') as handle:
for line in handle:
list = line.split('\t')
# Get PP#### details
chrom = list[0]
pos = list[1]
chrompos = '{}:{}'.format(chrom, pos)
PP = detail_dict[chrompos]['pp']
gene = detail_dict[chrompos]['gene']
# Get allele frequencies
groups = re.findall(allele_freq, line)
if len(groups) >= 1:
if PP in allele_freqs: continue
else:
allele_freqs[PP]={}
allele_freqs[PP][chrompos] = {'gene': gene,
'afs': []}
for group in groups:
allele_freqs[PP][chrompos]['afs'].append('{} = {}'.format(group[0], group[1]))
with open('allele_freqs.txt', 'w') as handle:
print >>handle, 'PP IDs, variants, and corresponding allele freqs\n'
for PP in allele_freqs:
print >>handle, '\n{}'.format(PP)
for chrompos in allele_freqs[PP]:
gene = allele_freqs[PP][chrompos]['gene']
print >>handle, '\t{} - {}'.format(chrompos, gene)
for af in allele_freqs[PP][chrompos]['afs']:
print >>handle, '\t\t{}'.format(af) | apache-2.0 | -8,445,689,372,967,776,000 | 34.428571 | 94 | 0.543116 | false |
h4ck3rm1k3/ansible | v2/ansible/module_utils/facts.py | 1 | 106530 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
import array
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
except ImportError:
import simplejson as json
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
# i86pc is a Solaris and derivatives-ism
_I386RE = re.compile(r'i([3456]86|86pc)')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva') )
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
]
def __init__(self):
self.facts = {}
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
data = out.split('\n')
self.facts['architecture'] = data[0]
def get_local_facts(self):
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = module.run_command(fn)
else:
out = open(fn).read()
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError, e:
# load raw ini
cp = ConfigParser.ConfigParser()
try:
cp.readfp(StringIO.StringIO(out))
except ConfigParser.Error, e:
fact="error loading fact - please check content"
else:
fact = {}
#print cp.sections()
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(self):
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse',
SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
)
# TODO: Rewrite this to use the function references in a dict pattern
# as it's much cleaner than this massive if-else
if self.facts['system'] == 'AIX':
self.facts['distribution'] = 'AIX'
rc, out, err = module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
elif self.facts['system'] == 'HP-UX':
self.facts['distribution'] = 'HP-UX'
rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
elif self.facts['system'] == 'Darwin':
self.facts['distribution'] = 'MacOSX'
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
elif self.facts['system'] == 'FreeBSD':
self.facts['distribution'] = 'FreeBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'OpenBSD':
self.facts['distribution'] = 'OpenBSD'
self.facts['distribution_release'] = platform.release()
rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
else:
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in Facts.OSDIST_LIST:
if os.path.exists(path):
if os.path.getsize(path) > 0:
if self.facts['distribution'] in ('Fedora', ):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
break
elif name == 'OpenWrt':
data = get_file_content(path)
if 'OpenWrt' in data:
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Alpine':
data = get_file_content(path)
self.facts['distribution'] = name
self.facts['distribution_version'] = data
break
elif name == 'Solaris':
data = get_file_content(path).split('\n')[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
break
uname_rc, uname_out, uname_err = module.run_command(['uname', '-v'])
distribution_version = None
if 'SmartOS' in data:
self.facts['distribution'] = 'SmartOS'
if os.path.exists('/etc/product'):
product_data = dict([l.split(': ', 1) for l in get_file_content('/etc/product').split('\n') if ': ' in l])
if 'Image' in product_data:
distribution_version = product_data.get('Image').split()[-1]
elif 'OpenIndiana' in data:
self.facts['distribution'] = 'OpenIndiana'
elif 'OmniOS' in data:
self.facts['distribution'] = 'OmniOS'
distribution_version = data.split()[-1]
elif uname_rc == 0 and 'NexentaOS_' in uname_out:
self.facts['distribution'] = 'Nexenta'
distribution_version = data.split()[-1].lstrip('v')
if self.facts['distribution'] in ('SmartOS', 'OpenIndiana', 'OmniOS', 'Nexenta'):
self.facts['distribution_release'] = data.strip()
if distribution_version is not None:
self.facts['distribution_version'] = distribution_version
elif uname_rc == 0:
self.facts['distribution_version'] = uname_out.split('\n')[0].strip()
break
elif name == 'SuSE':
data = get_file_content(path)
if 'suse' in data.lower():
if path == '/etc/os-release':
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split('=')[1]
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif path == '/etc/SuSE-release':
data = data.splitlines()
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
break
elif name == 'Debian':
data = get_file_content(path)
if 'Debian' in data:
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
break
else:
self.facts['distribution'] = name
self.facts['os_family'] = self.facts['distribution']
if self.facts['distribution'] in OS_FAMILY:
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError, e:
pass
def get_public_ssh_host_keys(self):
dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
if self.facts['system'] == 'Darwin':
dsa_filename = '/etc/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
dsa = get_file_content(dsa_filename)
rsa = get_file_content(rsa_filename)
ecdsa = get_file_content(ecdsa_filename)
if dsa is None:
dsa = 'NA'
else:
self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
if rsa is None:
rsa = 'NA'
else:
self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
if ecdsa is None:
ecdsa = 'NA'
else:
self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_lsb_facts(self):
lsb_path = module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
f = open('/etc/lsb-release', 'r')
try:
for line in f.readlines():
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
finally:
f.close()
else:
return self.facts
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError, e:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError, e:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError, e:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError, e:
self.facts['selinux']['type'] = 'unknown'
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
def get_env_facts(self):
self.facts['env'] = {}
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Hardware.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in LinuxHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
def get_cpu_facts(self):
i = 0
vendor_id_occurrence = 0
model_name_occurrence = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
xen = False
xen_paravirt = False
try:
if os.path.exists('/proc/xen'):
xen = True
elif open('/sys/hypervisor/type').readline().strip() == 'xen':
xen = True
except IOError:
pass
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
if xen:
if key == 'flags':
# Check for vme cpu flag, Xen paravirt does not expose this.
# Need to detect Xen paravirt because it exposes cpuinfo
# differently than Xen HVM or KVM and causes reporting of
# only a single cpu core.
if 'vme' not in data:
xen_paravirt = True
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor' or key == 'vendor_id':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
if key == 'vendor_id':
vendor_id_occurrence += 1
if key == 'model name':
model_name_occurrence += 1
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if vendor_id_occurrence == model_name_occurrence:
i = vendor_id_occurrence
if self.facts['architecture'] != 's390x':
if xen_paravirt:
self.facts['processor_count'] = i
self.facts['processor_cores'] = i
self.facts['processor_threads_per_core'] = 1
self.facts['processor_vcpus'] = i
else:
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError, e:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
if line.startswith('/'):
fields = line.rstrip('\n').split()
if(fields[2] != 'none'):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(fields[1])
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError, e:
continue
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
self.facts['swaptotal_mb'] = data[1]
self.facts['swapfree_mb'] = data[3]
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specifig subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Network.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self, module):
self.module = module
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = open(os.path.join(path, 'address')).read().strip()
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip())
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down'
# if os.path.exists(os.path.join(path, 'carrier')):
# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
type = open(os.path.join(path, 'type')).read().strip()
if type == '1':
interfaces[device]['type'] = 'ether'
elif type == '512':
interfaces[device]['type'] = 'ppp'
elif type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip()
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split()
interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0]
interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0]
interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0]
primary = open(os.path.join(path, 'bonding', 'primary')).read()
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = open(path).read() == '1'
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(open(os.path.join(path, 'flags')).read().strip(),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ifconfig_path = module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2][1:]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class AIXNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in open('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/1/cgroup'):
for line in open('/proc/1/cgroup').readlines():
if re.search('/docker/', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in open('/proc/self/status').readlines():
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in open('/proc/cpuinfo').readlines():
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in open("/proc/modules").readlines():
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
def get_file_content(path, default=None):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
data = open(path).read().strip()
if len(data) == 0:
data = default
return data
def ansible_facts(module):
facts = {}
facts.update(Facts().populate())
facts.update(Hardware().populate())
facts.update(Network(module).populate())
facts.update(Virtual().populate())
return facts
# ===========================================
def get_all_facts(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['verbose_override'] = True
return setup_result
| gpl-3.0 | 2,309,983,008,177,889,300 | 41.27381 | 165 | 0.506271 | false |
XiaoxiaoLiu/morphology_analysis | bigneuron/median_compare_to_consensus.py | 1 | 13289 | __author__ = 'xiaoxiaol'
import sys
import os
import platform
import matplotlib.pyplot as plt
import seaborn as sb
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import bigneuron.recon_prescreening as rp
import pandas as pd
import numpy as np
import time
def calculate_average_all_pair_distance(csv_file, hasConsensus=True):
#df_in is the output csv from median_swc plugin
#it contains unique pair-wise distances
#consensus is usually the last input the median_swc() inputs, so it won't show up in the "swc_file_name1" column
#output the average distances array
#remove invalid results
df_out=pd.DataFrame()
if not os.path.exists(csv_file):
return df_out
df_f = pd.read_csv(csv_file)
if df_f.empty:
return df_out
df_in = df_f[df_f['sum_distance'] >0]
df_out = pd.DataFrame(columns = ['swc_file_name','average_sum_distance','average_structure_difference','average_max_distance'])
dfg1 = df_in.groupby('swc_file_name1')
dfg2 = df_in.groupby('swc_file_name2')
swc_names = pd.unique(df_in['swc_file_name1'])
swc_names_2 = pd.unique(df_in['swc_file_name2'])
consensus_file_name = df_in['swc_file_name2'].tail(1).values[0]
if 'consensus' not in consensus_file_name:
#print "missing consensus"
return df_out
row = 0
for swc_name in swc_names:
a = dfg1.get_group(swc_name)
a = a[a['swc_file_name2']!=consensus_file_name]
b = pd.DataFrame(columns = ['swc_file_name1','swc_file_name2','sum_distance','structure_difference','max_distance']) #empty
if swc_name in swc_names_2:
b = dfg2.get_group(swc_name)
num_of_swcs = len(a) +len(b)
df_out.loc[row,'swc_file_name']= swc_name.split('/')[-1]
df_out.loc[row,'average_sum_distance'] = (a['sum_distance'].sum() + b['sum_distance'].sum())/ num_of_swcs
df_out.loc[row,'average_structure_difference'] = a['structure_difference'].sum() + b['structure_difference'].sum()/num_of_swcs
df_out.loc[row,'average_max_distance'] = a['max_distance'].sum() + b['max_distance'].sum()/num_of_swcs
row = row +1
df_out.loc[row,'swc_file_name']= consensus_file_name.split('/')[-1]
consensus_group = dfg2.get_group(consensus_file_name)
df_out.loc[row,'average_sum_distance'] = consensus_group['sum_distance'].sum() / (num_of_swcs+1)
df_out.loc[row,'average_structure_difference'] = consensus_group['structure_difference'].sum() / (num_of_swcs+1)
df_out.loc[row,'average_max_distance'] = consensus_group['max_distance'].sum() / (num_of_swcs+1)
return df_out
def plot_compare_median_consensus(output_dir, df_order, metric, type = 'ts',DISPLAY = 0):
plt.figure()
if type =='ts':
#sb.tsplot(data=df_order, value=metric,time='order',unit="algorithm",condition="algorithm",err_style="unit_traces")
ax = sb.boxplot(x=metric, y="algorithm", data=df_order,
whis=np.inf, color="c")
# Add in points to show each observation
sb.stripplot(x=metric, y="algorithm", data=df_order,
jitter=True, size=3, color=".3", linewidth=0)
ax.set_xscale("log")
sb.despine(trim=True)
# plt.xlabel('images sorted by the average neuron distance of the median reconstruction')
plt.savefig(output_dir + '/ts_compare_median_with_consensus_'+metric+'.png', format='png')
if type =='lm':
sb.lmplot(x="order", y=metric, hue="algorithm", data=df_order)
plt.xlabel('images sorted by the average neuron distance of the median reconstruction')
plt.savefig(output_dir + '/lm_compare_median_with_consensus_'+metric+'.lm.png', format='png')
if DISPLAY:
plt.show()
plt.close()
###################################################################################################################################################
def pipe(input_data_dir, output_dir, imageIDs, distance_file_postfix='median_distances.csv',COLLECT_FROM_DISTANCE_MATRIX=1,EXTRACT_MEDIAN_CONSENSUS=1, DISPLAY=0):
BeginTime = time.time()
all_average_csv = output_dir+'/all_averaged_distances.csv'
######################################
if COLLECT_FROM_DISTANCE_MATRIX:
#remove empty files
os.system('find '+ input_data_dir +' -size 0 -delete')
os.system('mkdir '+ output_dir)
else:
print "Distance csv files had been collected. Skip collecting."
if COLLECT_FROM_DISTANCE_MATRIX:
df_all = pd.DataFrame(columns=['image_id', 'algorithm','swc_file_name','average_sum_distance','average_structure_difference','average_max_distance'])
count = 0
for image_id in imageIDs:
df_image_filled_template = pd.DataFrame(columns = df_all.columns)
df_image_filled_template['image_id'] = image_id
csv_file = input_data_dir +'/'+image_id+'_'+distance_file_postfix
if not os.path.exists(csv_file): # for gold 163, consensus results are stored in individual image folders
csv_file = input_data_dir +'/'+image_id+'/'+distance_file_postfix
if not os.path.exists(csv_file):
print "missing "+csv_file
continue
print "read "+csv_file
df_ff = calculate_average_all_pair_distance(csv_file, hasConsensus = True)
if not df_ff.empty:
for i in range(df_ff.shape[0]):
if isinstance(df_ff.iloc[i].swc_file_name, basestring) :
alg = rp.matchFileToAlgorithmName((df_ff.iloc[i].swc_file_name).split('/')[-1])
else:
print "nan input swc_file_name"
print df_ff.iloc[i].swc_file_name
continue
df_image_filled_template.loc[count] = [image_id, alg,df_ff.iloc[i]['swc_file_name'],df_ff.iloc[i]['average_sum_distance'],df_ff.iloc[i]['average_structure_difference'],
df_ff.iloc[i]['average_max_distance']]
count = count +1
else:
print "empty df_ff:"+csv_file
df_all = df_all.append(df_image_filled_template,ignore_index=True)
df_all.to_csv(all_average_csv, index=False)
print "Done collecting median distances"
print "Output:" + all_average_csv
#####################################################################
PLOT_algorithm_consensus = 1
metric = 'average_sum_distance'
if PLOT_algorithm_consensus:
df_all = pd.read_csv(all_average_csv)
all_algorithms = np.unique(df_all.algorithm)
plt.figure()
sb.set_context("talk", font_scale=0.7)
dfg = df_all.groupby('algorithm')
sample_size_per_algorithm = np.zeros(all_algorithms.size)
jj = 0
for alg in all_algorithms:
df_a = dfg.get_group(alg)
df_a = df_a[df_a[metric]>=0]
sample_size_per_algorithm[jj] = df_a.shape[0]
jj = jj+1
order = sample_size_per_algorithm.argsort()
algorithms_ordered = all_algorithms[order[::-1]]
sample_size_per_algorithm =sample_size_per_algorithm[order[::-1]]
a = sb.barplot(y='algorithm', x=metric, data=df_all, order = algorithms_ordered)
#a = sb.tsplot(data=df_all, time='image_id', value='total_average_distance')
algorithm_names = [rp.map_better_algorithm_name(x) for x in algorithms_ordered]
a.set_yticklabels(['%s ($n$=%d )'%(algorithm_names[i], sample_size_per_algorithm[i]) for i in range(algorithms_ordered.size) ])
plt.subplots_adjust(left=0.4, bottom=0.1, top=0.9)
plt.savefig(output_dir + '/compare_distance_plot.png', format='png')
if DISPLAY:
plt.show()
plt.close()
print "Done plotting algorithm comparison."
#####################################################
metric = 'average_sum_distance'
output_median_and_consensus_csv_file = output_dir +'/extracted_median_consensus.csv'
if not EXTRACT_MEDIAN_CONSENSUS :
print "Median and consensus swc info had been collected. Skip this step."
if EXTRACT_MEDIAN_CONSENSUS:
df_all = pd.read_csv(all_average_csv)
print df_all.shape
dfg = df_all.groupby('image_id')
df_median_and_consensus = pd.DataFrame(columns=['image_id', 'algorithm','swc_file_name','average_sum_distance','average_structure_difference','average_max_distance'])
PLOT_imageIDs = pd.unique(df_all['image_id'])
count = 0
for image_id in PLOT_imageIDs:
#print "image_id: "+ str( image_id)
df_image = dfg.get_group(image_id)
#drop nans
df_image.dropna(axis=0, how="any", inplace =True)
if len(df_image) <1:
print "no valid recons found for :" + image_id
continue
i = 0
for fn in df_image['swc_file_name']:
if 'consensus' in fn:
break
i= i+1
if i>= len(df_image):
print 'wrong consensus id found' + str(i) +"mage id:" +image_id
continue
df_median_and_consensus.loc[count] =[image_id, 'consensus',df_image.iloc[i]['swc_file_name'],df_image.iloc[i]['average_sum_distance'],
df_image.iloc[i]['average_structure_difference'],df_image.iloc[i]['average_max_distance']]
count = count +1
df_image.drop(df_image.index[[i]], axis=0, inplace =True)
df_image.sort(columns=['average_sum_distance', 'average_structure_difference','average_max_distance'], ascending = True,inplace=True)
df_median_and_consensus.loc[count] =[image_id, 'median',df_image.iloc[0]['swc_file_name'],df_image.iloc[0]['average_sum_distance'],
df_image.iloc[0]['average_structure_difference'],df_image.iloc[0]['average_max_distance']]
count = count +1
df_median_and_consensus.to_csv(output_median_and_consensus_csv_file)
print "Done extracting median distances"
print "Output median and consensus distances for each image:"+output_median_and_consensus_csv_file
PLOT_MEDIAN_CONSENSUS = 1
if PLOT_MEDIAN_CONSENSUS:
# reorer by distance
df_median_and_consensus = pd.read_csv(output_median_and_consensus_csv_file)
dfg = df_median_and_consensus.groupby('algorithm')
df_consensus = dfg.get_group('consensus')
df_median = dfg.get_group('median')
df_median.reset_index(inplace=True)
df_consensus.reset_index(inplace=True)
#sort by average distance
df_median.sort(columns=['average_sum_distance'], inplace=True)
df_median['order'] = range(0,len(df_median))
df_consensus = df_consensus.iloc[df_median.index]
df_consensus['order'] = range(0,len(df_median))
df_diff=pd.DataFrame(columns=['image_id','difference'])
df_diff['difference'] = df_median['average_sum_distance'] -df_consensus['average_sum_distance']
df_diff['image_id'] = df_median['image_id']
df_ff = df_diff['difference']
print "median - consensus:"
max_idx = np.nanargmin(np.abs(df_ff-df_ff.max()))
print "max value: %f, image : %s" % (df_ff.max(), df_median.iloc[max_idx]['image_id'])
min_idx = np.nanargmin(np.abs(df_ff-df_ff.min()))
print "min value: %f, image id: %s" % (df_ff.min(),df_median.iloc[min_idx]['image_id'])
median_idx = np.nanargmin(np.abs(df_ff-df_ff.median()))
print "median value: %f, image : %s" % (df_ff.median(),df_median.iloc[median_idx]['image_id'])
df_ff_big = df_diff[df_diff['difference']>0]
print "consensus is closer to each reconstructions than the median reconstructions in %.2f percent of the %d total images" %( 100*float(len(df_ff_big))/len(df_diff), len(df_diff))
df_ff_small = df_diff[df_diff['difference']<0]
df_ff_small.to_csv(output_dir+'/investigate.csv')
print "investigate the following cases list in:"+ output_dir+'/investigate.csv'
#make sure the image_ids are matching
for i in range(0,len(df_median)):
if df_consensus.iloc[i]['image_id'] != df_median.iloc[i]['image_id']:
print "error matching"
print df_consensus.iloc[i]['image_id']
print df_median.iloc[i]['image_id']
print exit()
frames=[df_consensus,df_median]
df_order = pd.concat(frames)
for type in ['ts']:
plot_compare_median_consensus(output_dir,df_order, 'average_sum_distance',type, DISPLAY)
plot_compare_median_consensus(output_dir,df_order, 'average_max_distance',type, DISPLAY)
plot_compare_median_consensus(output_dir,df_order, 'average_structure_difference',type, DISPLAY)
endtime = time.time()
dur=(endtime-BeginTime)/60
print "All takes %d min" %(dur)
| gpl-3.0 | 7,909,675,514,826,220,000 | 37.29683 | 188 | 0.591843 | false |
mfellner/maximilianfellner.eu | app/shared/models/__init__.py | 1 | 2730 | # -*- coding: utf-8 -*-
from abc import abstractmethod, ABCMeta
from flask.json import jsonify
from flask.ext.sqlalchemy import SQLAlchemy
import serialize
db = SQLAlchemy()
class JSendResponse(serialize.SerializableMixin):
"""Base class for restful JSON responses according to the JSend specification (http://labs.omniti.com/labs/jsend).
:param status: either 'success', 'fail' or 'error'.
:type status: str
"""
__metaclass__ = ABCMeta
def __init__(self, status):
self.status = status
@staticmethod
def new_success(data=None):
"""Create a new 'success' response.
:param data: optional data of the response.
:type data: object
:returns: DataResponse with status 'success'.
"""
return DataResponse('success', data)
@staticmethod
def new_fail(data):
"""Create a new 'fail' response.
:param data: object explaining the failure.
:type data: object
:returns: DataResponse with status 'fail'.
"""
return DataResponse('fail', data)
@staticmethod
def new_error(message):
"""Create a new 'error' response.
:param message: message explaining the error.
:type message: str
:returns: MessageResponse with status 'error'.
"""
return MessageResponse('error', message)
@abstractmethod
def serializable_fields(self, **kwargs):
return {}
def jsonify(self, **kwargs):
return jsonify(self.serialize(**kwargs))
class DataResponse(JSendResponse):
"""Response with a status and optional data.
:param status: either 'success' or 'fail'
:type status: str
:param data: optional data of the response. Data which needs to be formatted must implement Serializable.
"""
def __init__(self, status, data=None):
self.data = data
super(DataResponse, self).__init__(status)
def serializable_fields(self, **kwargs):
if isinstance(self.data, (serialize.SerializableMixin, list)):
return {'status': serialize.String,
'data': serialize.Nested}
else:
return {'status': serialize.String,
'data': serialize.Raw}
class MessageResponse(JSendResponse):
"""Response with a status and message.
:param status: usually 'error'
:type status: str
:param message: description of the error.
:type message: str
"""
def __init__(self, status, message):
self.message = message
super(MessageResponse, self).__init__(status)
def serializable_fields(self, **kwargs):
return {'status': serialize.String,
'message': serialize.String}
| bsd-3-clause | -1,181,844,369,541,499,600 | 25.25 | 118 | 0.624908 | false |
nextgis-extra/tests | lib_gdal/gdrivers/ers.py | 1 | 11795 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ers.py 33129 2016-01-23 21:10:49Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test ERS format driver.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2007, Frank Warmerdam <[email protected]>
# Copyright (c) 2011-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import gdal
from osgeo import osr
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Perform simple read test.
def ers_1():
tst = gdaltest.GDALTest( 'ERS', 'srtm.ers', 1, 64074 )
return tst.testOpen()
###############################################################################
# Create simple copy and check.
def ers_2():
tst = gdaltest.GDALTest( 'ERS', 'float32.bil', 1, 27 )
return tst.testCreateCopy( new_filename = 'tmp/float32.ers',
check_gt = 1, vsimem = 1 )
###############################################################################
# Test multi-band file.
def ers_3():
tst = gdaltest.GDALTest( 'ERS', 'rgbsmall.tif', 2, 21053 )
return tst.testCreate( new_filename = 'tmp/rgbsmall.ers' )
###############################################################################
# Test HeaderOffset case.
def ers_4():
gt = (143.59625, 0.025, 0.0, -39.38125, 0.0, -0.025)
srs = """GEOGCS["GEOCENTRIC DATUM of AUSTRALIA",
DATUM["GDA94",
SPHEROID["GRS80",6378137,298.257222101]],
PRIMEM["Greenwich",0],
UNIT["degree",0.0174532925199433]]"""
tst = gdaltest.GDALTest( 'ERS', 'ers_dem.ers', 1, 56588 )
return tst.testOpen( check_prj = srs, check_gt = gt )
###############################################################################
# Confirm we can recognised signed 8bit data.
def ers_5():
ds = gdal.Open( 'data/8s.ers' )
md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')
if md['PIXELTYPE'] != 'SIGNEDBYTE':
gdaltest.post_reason( 'Failed to detect SIGNEDBYTE' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Confirm a copy preserves the signed byte info.
def ers_6():
drv = gdal.GetDriverByName( 'ERS' )
src_ds = gdal.Open( 'data/8s.ers' )
ds = drv.CreateCopy( 'tmp/8s.ers', src_ds )
md = ds.GetRasterBand(1).GetMetadata('IMAGE_STRUCTURE')
if md['PIXELTYPE'] != 'SIGNEDBYTE':
gdaltest.post_reason( 'Failed to detect SIGNEDBYTE' )
return 'fail'
ds = None
drv.Delete( 'tmp/8s.ers' )
return 'success'
###############################################################################
# Test opening a file with everything in lower case.
def ers_7():
ds = gdal.Open( 'data/caseinsensitive.ers' )
desc = ds.GetRasterBand(1).GetDescription()
if desc != 'RTP 1st Vertical Derivative':
print(desc)
gdaltest.post_reason( 'did not get expected values.' )
return 'fail'
return 'success'
###############################################################################
# Test GCP support
def ers_8():
src_ds = gdal.Open('../gcore/data/gcps.vrt')
drv = gdal.GetDriverByName( 'ERS' )
ds = drv.CreateCopy('/vsimem/ers_8.ers', src_ds)
ds = None
gdal.Unlink('/vsimem/ers_8.ers.aux.xml')
ds = gdal.Open('/vsimem/ers_8.ers')
expected_gcps = src_ds.GetGCPs()
gcps = ds.GetGCPs()
gcp_count = ds.GetGCPCount()
wkt = ds.GetGCPProjection()
ds = None
if wkt != """PROJCS["NUTM11",GEOGCS["NAD27",DATUM["North_American_Datum_1927",SPHEROID["Clarke 1866",6378206.4,294.978698213898,AUTHORITY["EPSG","7008"]],TOWGS84[-3,142,183,0,0,0,0],AUTHORITY["EPSG","6267"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AXIS["Lat",NORTH],AXIS["Long",EAST],AUTHORITY["EPSG","4267"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["Meter",1]]""":
gdaltest.post_reason('did not get expected GCP projection')
print(wkt)
return 'fail'
if len(gcps) != len(expected_gcps) or len(gcps) != gcp_count:
gdaltest.post_reason('did not get expected GCP number')
return 'fail'
for i in range(len(gcps)):
if abs(gcps[i].GCPPixel - expected_gcps[i].GCPPixel) > 1e-6 or \
abs(gcps[i].GCPLine - expected_gcps[i].GCPLine) > 1e-6 or \
abs(gcps[i].GCPX - expected_gcps[i].GCPX) > 1e-6 or \
abs(gcps[i].GCPY - expected_gcps[i].GCPY) > 1e-6:
gdaltest.post_reason('did not get expected GCP %d' % i)
print(gcps[i])
return 'fail'
drv.Delete('/vsimem/ers_8.ers')
return 'success'
###############################################################################
# Test NoData support (#4207)
def ers_9():
drv = gdal.GetDriverByName( 'ERS' )
ds = drv.Create('/vsimem/ers_9.ers', 1, 1)
ds.GetRasterBand(1).SetNoDataValue(123)
ds = None
f = gdal.VSIFOpenL('/vsimem/ers_9.ers.aux.xml', 'rb')
if f is not None:
gdaltest.post_reason('/vsimem/ers_9.ers.aux.xml should not exist')
gdal.VSIFCloseL(f)
drv.Delete('/vsimem/ers_9.ers')
return 'fail'
ds = gdal.Open('/vsimem/ers_9.ers')
val = ds.GetRasterBand(1).GetNoDataValue()
ds = None
drv.Delete('/vsimem/ers_9.ers')
if val != 123:
gdaltest.post_reason('did not get expected nodata value')
print(val)
return 'fail'
return 'success'
###############################################################################
# Test PROJ, DATUM, UNITS support (#4229)
def ers_10():
drv = gdal.GetDriverByName( 'ERS' )
ds = drv.Create('/vsimem/ers_10.ers', 1, 1, options = ['DATUM=GDA94', 'PROJ=MGA55', 'UNITS=METERS'])
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
if proj != 'MGA55':
gdaltest.post_reason('did not get expected PROJ')
print(proj)
return 'fail'
if datum != 'GDA94':
gdaltest.post_reason('did not get expected DATUM')
print(datum)
return 'fail'
if units != 'METERS':
gdaltest.post_reason('did not get expected UNITS')
print(units)
return 'fail'
# This should be overridden by the above values
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
ds.SetProjection(sr.ExportToWkt())
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
if proj != 'MGA55':
gdaltest.post_reason('did not get expected PROJ')
print(proj)
return 'fail'
if datum != 'GDA94':
gdaltest.post_reason('did not get expected DATUM')
print(datum)
return 'fail'
if units != 'METERS':
gdaltest.post_reason('did not get expected UNITS')
print(units)
return 'fail'
ds = None
f = gdal.VSIFOpenL('/vsimem/ers_10.ers.aux.xml', 'rb')
if f is not None:
gdaltest.post_reason('/vsimem/ers_10.ers.aux.xml should not exist')
gdal.VSIFCloseL(f)
drv.Delete('/vsimem/ers_10.ers')
return 'fail'
ds = gdal.Open('/vsimem/ers_10.ers')
wkt = ds.GetProjectionRef()
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
md_ers = ds.GetMetadata("ERS")
ds = None
drv.Delete('/vsimem/ers_10.ers')
if proj != 'MGA55':
gdaltest.post_reason('did not get expected PROJ')
print(proj)
return 'fail'
if datum != 'GDA94':
gdaltest.post_reason('did not get expected DATUM')
print(datum)
return 'fail'
if units != 'METERS':
gdaltest.post_reason('did not get expected UNITS')
print(units)
return 'fail'
if md_ers["PROJ"] != proj or md_ers["DATUM"] != datum or md_ers["UNITS"] != units:
gdaltest.post_reason('GetMetadata() not consistent with '
'GetMetadataItem()')
print(md_ers)
return 'fail'
if wkt.find("""PROJCS["MGA55""") != 0:
gdaltest.post_reason('did not get expected projection')
print(wkt)
return 'fail'
ds = drv.Create('/vsimem/ers_10.ers', 1, 1, options = ['DATUM=GDA94', 'PROJ=MGA55', 'UNITS=FEET'])
ds = None
# Check that we can update those values with SetProjection()
ds = gdal.Open('/vsimem/ers_10.ers', gdal.GA_Update)
sr = osr.SpatialReference()
sr.ImportFromEPSG(4326)
ds.SetProjection(sr.ExportToWkt())
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
if proj != 'GEODETIC':
gdaltest.post_reason('did not get expected PROJ')
print(proj)
return 'fail'
if datum != 'WGS84':
gdaltest.post_reason('did not get expected DATUM')
print(datum)
return 'fail'
if units != 'METERS':
gdaltest.post_reason('did not get expected UNITS')
print(units)
return 'fail'
ds = None
ds = gdal.Open('/vsimem/ers_10.ers')
proj = ds.GetMetadataItem("PROJ", "ERS")
datum = ds.GetMetadataItem("DATUM", "ERS")
units = ds.GetMetadataItem("UNITS", "ERS")
ds = None
drv.Delete('/vsimem/ers_10.ers')
if proj != 'GEODETIC':
gdaltest.post_reason('did not get expected PROJ')
print(proj)
return 'fail'
if datum != 'WGS84':
gdaltest.post_reason('did not get expected DATUM')
print(datum)
return 'fail'
if units != 'METERS':
gdaltest.post_reason('did not get expected UNITS')
print(units)
return 'fail'
return 'success'
###############################################################################
# Cleanup
def ers_cleanup():
gdaltest.clean_tmp()
return 'success'
gdaltest_list = [
ers_1,
ers_2,
ers_3,
ers_4,
ers_5,
ers_6,
ers_7,
ers_8,
ers_9,
ers_10,
ers_cleanup
]
if __name__ == '__main__':
gdaltest.setup_run( 'ers' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| gpl-2.0 | -2,559,820,417,308,844,000 | 29.796345 | 600 | 0.56295 | false |
ddy88958620/lib | Python/scrapy/scentiments/fragrancenet.py | 2 | 3201 | import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from scrapy import log
class FragrancenetSpider(BaseSpider):
name = 'fragrancenet.com'
allowed_domains = ['fragrancenet.com']
start_urls = ('http://www.fragrancenet.com/f/net/view_all.html?locale=en_US',)
def parse(self, response):
hxs = HtmlXPathSelector(response)
brands = hxs.select('//div[@class="descolumn"]//a/@href').extract()
for brand in brands:
yield Request(brand)
next_page = hxs.select('//a[@class="next active"]/@href').extract()
if next_page:
yield Request(next_page[0])
for product in self.parse_products(hxs, response):
yield product
def parse_products(self, hxs, response):
products = hxs.select('//div[@class="productList clear"]//div[starts-with(@class, "promoCell")]')
for p in products:
loader = ProductLoader(item=Product(), selector=p)
name = p.select('.//p[@class="para1"]//text()').extract()
name = ' '.join([n.strip() for n in name])
name = re.sub(' +', ' ', name)
loader.add_xpath('url', './/a[starts-with(@class, "border")]/@href')
loader.add_value('name', name)
loader.add_xpath('sku', './/p[@class="border"]/text()', re='Item: (.*)')
loader.add_xpath('price', './/p[@class="para3"]/text()', re='Our Price: (.*)')
if not loader.get_output_value('price'):
yield Request(loader.get_output_value('url'), callback=self.parse_products2)
continue
if not p.select('.//p[@class="para3"]/text()').re('Our Price: (.*)')[0].startswith('$')\
and response.meta.get('ret', 0) < 3:
yield Request(response.url, dont_filter=True, meta={'ret': response.meta.get('ret', 0) + 1})
return
yield loader.load_item()
def parse_products2(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//div[@class="item"]//td[@class="col1"]/..')
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_value('url', response.url)
loader.add_xpath('name', './/p[@class="para1"]/text()')
loader.add_xpath('price', './/p[@class="ourPrice"]/following-sibling::p/strong/text()')
loader.add_xpath('sku', './/p[@class="para2"]/text()', re='item #(.*)')
price = product.select('.//p[@class="ourPrice"]/following-sibling::p/strong/text()').extract()[0]
if not price.startswith('$') and response.meta.get('ret', 0) < 3:
yield Request(response.url, dont_filter=True,
meta={'ret': response.meta.get('ret', 0) + 1},
callback=self.parse_products2)
return
yield loader.load_item()
| apache-2.0 | -4,551,588,988,013,990,000 | 39.0125 | 109 | 0.578882 | false |
aldebaran/qibuild | python/qibuild/test/test_qibuild_make_host_tools.py | 1 | 2206 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" Test QiBuild Make Host Tools """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qibuild.find
def test_make_host_tools(qibuild_action, fake_ctc):
""" Test Make Host Tools """
footool_proj = qibuild_action.add_test_project("footool")
qibuild_action.add_test_project("usefootool")
qibuild_action("make-host-tools", "usefootool")
qibuild.find.find_bin([footool_proj.sdk_directory], "footool", expect_one=True)
qibuild_action("configure", "usefootool", "--config", "fake-ctc")
def test_recurse_deps(qibuild_action):
""" Test Recurse Deps """
footool_proj = qibuild_action.add_test_project("footool")
qibuild_action.add_test_project("usefootool")
qibuild_action.create_project("bar", run_depends=["usefootool"])
qibuild_action("make-host-tools", "bar")
qibuild.find.find_bin([footool_proj.sdk_directory], "footool", expect_one=True)
def test_building_host_tools_in_release(qibuild_action, record_messages):
""" Test Building Host Tools In Release """
qibuild_action.add_test_project("footool")
qibuild_action.add_test_project("usefootool")
record_messages.reset()
qibuild_action("make-host-tools", "--release", "usefootool")
assert record_messages.find("Building footool in Release")
qibuild_action("configure", "usefootool")
qibuild_action("make", "usefootool")
def test_no_project_specified(qibuild_action):
""" Test No Project Specified """
qibuild_action.add_test_project("footool")
usefootool_proj = qibuild_action.add_test_project("usefootool")
qibuild_action.chdir(usefootool_proj.path)
qibuild_action("make-host-tools")
qibuild_action("configure")
def test_using_dash_all(qibuild_action):
""" Test Using Dash All """
qibuild_action.add_test_project("footool")
qibuild_action.add_test_project("usefootool")
qibuild_action("make-host-tools", "--all")
qibuild_action("configure", "usefootool")
| bsd-3-clause | 4,113,590,440,546,380,000 | 38.392857 | 84 | 0.706256 | false |
quark-mcu/qm-bootloader | tools/sysupdate/qmfmlib/dfu.py | 2 | 4542 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Quark Microcontroller Firmware Update DFU Module
This module provides classes to add and DFU compliance to firmware images for
Quark Microcontrollers.
"""
from __future__ import print_function, division, absolute_import
import struct
import binascii
_ENDIAN = "<" # Defines the endian for struct packing. ('<'=little, '>'=big)
class DFUException(Exception):
""" DFU Exception """
def __init__(self, message):
super(DFUException, self).__init__(message)
class DFUImage(object):
""" Creates a DFU compatible file from a binary file. """
suffix_crc = 0
dfu_spec = 0x0100 # DFU 1.0
length = 16
release = 0xFFFF
id_product = 0xFFFF
id_vendor = 0xFFFF
_suffix_struct = struct.Struct("%sHHHH3cBI" % _ENDIAN)
def __init__(self):
pass
@property
def _pack_suffix_tuple(self):
""" Tuple containing the suffix information in a defined order. """
return (
self.release,
self.id_product,
self.id_vendor,
self.dfu_spec,
'U', 'F', 'D',
self.length,
self.suffix_crc,
)
@property
def packed_dfu_suffix(self):
""" Binary representation DFU suffix. """
ret = self._suffix_struct.pack(*self._pack_suffix_tuple)
return ret
def crc(self, data, return_type=str):
""" Calculate CRC.
CRC32 as binary string or as defined with `format` of image file
ignoring the last 4 bytes. These 4 bytes are space-holder for the
resulting CRC32.
Args:
data (string): Binary content. (Must have 4 bytes CRC32 appended)
return_type (type): The return type of CRC value. (int or str)
Returns:
Formatted CRC value.
Raises:
QFUException: In case of an invalid return_type or missing content.
"""
if len(data) < 4:
raise DFUException("no content available")
crc = binascii.crc32(data[:-4])
# NOTE: DFU-Util expects ~CRC
crc = ~crc & 0xffffffff
if return_type == int:
return crc
if return_type == str:
return struct.Struct("%sI" % _ENDIAN).pack(crc)
raise DFUException("unknown return type: %s" % return_type)
def add_suffix(self, data, id_product=0xFFFF, id_vendor=0xFFFF):
""" Adds DFU Suffix and the binary data.
Args:
data (string): Content of the binary file as a binary string.
id_product (int): USB / DFU Product ID.
id_vendor (int): USB / DFU Vendor ID.
Returns:
Binary data with attached DFU suffix.
"""
self.id_vendor = id_vendor
self.id_product = id_product
# Set DFU suffix.
data += self.packed_dfu_suffix
# Compute suffix CRC.
self.suffix_crc = self.crc(data, str)
return data[:-4] + self.suffix_crc
| bsd-3-clause | 6,072,804,549,549,071,000 | 32.644444 | 79 | 0.653236 | false |
eayunstack/rally | tests/unit/plugins/common/sla/test_max_average_duration.py | 6 | 3022 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import jsonschema
from rally.plugins.common.sla import max_average_duration
from tests.unit import test
@ddt.ddt
class MaxAverageDurationTestCase(test.TestCase):
def test_config_schema(self):
properties = {
"max_avg_duration": 0
}
self.assertRaises(jsonschema.ValidationError,
max_average_duration.MaxAverageDuration.validate,
properties)
def test_result(self):
sla1 = max_average_duration.MaxAverageDuration(42)
sla2 = max_average_duration.MaxAverageDuration(3.62)
for sla in [sla1, sla2]:
sla.add_iteration({"duration": 3.14})
sla.add_iteration({"duration": 6.28})
self.assertTrue(sla1.result()["success"]) # 42 > avg([3.14, 6.28])
self.assertFalse(sla2.result()["success"]) # 3.62 < avg([3.14, 6.28])
self.assertEqual("Passed", sla1.status())
self.assertEqual("Failed", sla2.status())
def test_result_no_iterations(self):
sla = max_average_duration.MaxAverageDuration(42)
self.assertTrue(sla.result()["success"])
def test_add_iteration(self):
sla = max_average_duration.MaxAverageDuration(4.0)
self.assertTrue(sla.add_iteration({"duration": 3.5}))
self.assertTrue(sla.add_iteration({"duration": 2.5}))
self.assertTrue(sla.add_iteration({"duration": 5.0})) # avg = 3.667
self.assertFalse(sla.add_iteration({"duration": 7.0})) # avg = 4.5
self.assertTrue(sla.add_iteration({"duration": 1.0})) # avg = 3.8
@ddt.data([[1.0, 2.0, 1.5, 4.3],
[2.1, 3.4, 1.2, 6.3, 7.2, 7.0, 1.],
[1.1, 1.1, 2.2, 2.2, 3.3, 4.3]])
def test_merge(self, durations):
single_sla = max_average_duration.MaxAverageDuration(4.0)
for dd in durations:
for d in dd:
single_sla.add_iteration({"duration": d})
slas = [max_average_duration.MaxAverageDuration(4.0)
for _ in durations]
for idx, sla in enumerate(slas):
for duration in durations[idx]:
sla.add_iteration({"duration": duration})
merged_sla = slas[0]
for sla in slas[1:]:
merged_sla.merge(sla)
self.assertEqual(single_sla.success, merged_sla.success)
self.assertEqual(single_sla.avg, merged_sla.avg)
| apache-2.0 | 1,555,456,327,963,673,300 | 36.775 | 78 | 0.619788 | false |
JioCloud/keystone | keystone/openstack/common/versionutils.py | 7 | 4958 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helpers for comparing version strings.
"""
import functools
import pkg_resources
from keystone.openstack.common.gettextutils import _
from keystone.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class deprecated(object):
"""A decorator to mark callables as deprecated.
deprecated logs a deprecation message when the callable it decorates
is used. The message will include the release where the callable was
deprecated, the release where is may be removed and possibly an
optional replacement.
Examples:
1. Specifing the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
2. Specifing a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
"""
FOLSOM = 'F'
GRIZZLY = 'G'
HAVANA = 'H'
ICEHOUSE = 'I'
_RELEASES = {
'F': 'Folsom',
'G': 'Grizzly',
'H': 'Havana',
'I': 'Icehouse',
}
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
def __init__(self, as_of, in_favor_of=None, remove_in=2, what=None):
"""Initialize decorator
:param as_of: the release deprecating the callable. Constants
are define in this class for convenience.
:param in_favor_of: the replacement for the callable (optional)
:param remove_in: an integer specifying how many releases to wait
before removing (default: 2)
:param what: name of the thing being deprecated (default: the
callable's name)
"""
self.as_of = as_of
self.in_favor_of = in_favor_of
self.remove_in = remove_in
self.what = what
def __call__(self, func):
if not self.what:
self.what = func.__name__ + '()'
@functools.wraps(func)
def wrapped(*args, **kwargs):
msg, details = self._build_message()
LOG.deprecated(msg, details)
return func(*args, **kwargs)
return wrapped
def _get_safe_to_remove_release(self, release):
# TODO(dstanek): this method will have to be reimplemented once
# when we get to the X release because once we get to the Y
# release, what is Y+2?
new_release = chr(ord(release) + self.remove_in)
if new_release in self._RELEASES:
return self._RELEASES[new_release]
else:
return new_release
def _build_message(self):
details = dict(what=self.what,
as_of=self._RELEASES[self.as_of],
remove_in=self._get_safe_to_remove_release(self.as_of))
if self.in_favor_of:
details['in_favor_of'] = self.in_favor_of
msg = self._deprecated_msg_with_alternative
else:
msg = self._deprecated_msg_no_alternative
return msg, details
def is_compatible(requested_version, current_version, same_major=True):
"""Determine whether `requested_version` is satisfied by
`current_version`; in other words, `current_version` is >=
`requested_version`.
:param requested_version: version to check for compatibility
:param current_version: version to check against
:param same_major: if True, the major version must be identical between
`requested_version` and `current_version`. This is used when a
major-version difference indicates incompatibility between the two
versions. Since this is the common-case in practice, the default is
True.
:returns: True if compatible, False if not
"""
requested_parts = pkg_resources.parse_version(requested_version)
current_parts = pkg_resources.parse_version(current_version)
if same_major and (requested_parts[0] != current_parts[0]):
return False
return current_parts >= requested_parts
| apache-2.0 | -1,415,123,992,306,960,100 | 32.5 | 78 | 0.639774 | false |
baleboy/crosswalk-app-tools | lint.py | 2 | 6253 | #!/usr/bin/env python
import os
import sys
import commands
import shutil
from optparse import OptionParser
import urllib2
import re
from bs4 import BeautifulSoup
import platform
os.system("node -v")
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
crosswalk_test_suite = os.path.join(SCRIPT_PATH, "crosswalk-test-suite")
tmp = os.path.join(SCRIPT_PATH, "tmp")
apptools = os.path.join(crosswalk_test_suite, "apptools")
apptools_android_tests = os.path.join(tmp, "apptools-android-tests")
apptools_windows_tests = os.path.join(tmp, "apptools-windows-tests")
apptools_ios_tests = os.path.join(tmp, "apptools-ios-tests")
os.environ['CROSSWALK_APP_SRC'] = os.path.join(SCRIPT_PATH, "src") + "/"
returnCode = 0
if os.path.exists(crosswalk_test_suite):
os.chdir(crosswalk_test_suite)
cmd = 'git pull'
returnCode = os.system(cmd)
os.chdir(SCRIPT_PATH)
else:
cmd = 'git clone https://github.com/crosswalk-project/crosswalk-test-suite'
returnCode = os.system(cmd)
if returnCode == 1:
sys.exit(1)
if os.path.exists(tmp):
shutil.rmtree(tmp)
def crosswalk_version(channel, platform):
htmlDoc = urllib2.urlopen(
'https://download.01.org/crosswalk/releases/crosswalk/' + platform + '/' +
channel +
'/').read()
soup = BeautifulSoup(htmlDoc)
alist = soup.find_all('a')
version = ''
for index in range(-1, -len(alist)-1, -1):
aEle = alist[index]
version = aEle['href'].strip('/')
if re.search('[0-9]*\.[0-9]*\.[0-9]*\.[0-9]*', version):
break
return version
def main():
usage = "Usage: ./lint.py -p android"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-p",
dest="platform",
help="specify the testsuit platform, e.g. android, windows, ios")
opts_parser.add_option(
"-a",
dest="arch",
help="specify the packing apk bit, e.g. 32bit, 64bit")
global BUILD_PARAMETERS
(BUILD_PARAMETERS, args) = opts_parser.parse_args()
if BUILD_PARAMETERS.platform == "android":
os.environ['CROSSWALK_APP_TOOLS_CACHE_DIR'] = os.path.join(apptools_android_tests, "tools")
x = []
for i in list(os.popen('adb devices -l'))[1:]:
if i.strip(os.linesep) != "" and i.strip(os.linesep).split(" ")[0] != "*":
x.append(i.strip(os.linesep).split(" ")[0])
if x:
os.environ['DEVICE_ID'] = ",".join(x)
os.environ['SKIP_EMULATOR'] = "True"
android_crosswalk_version = crosswalk_version("stable", BUILD_PARAMETERS.platform)
shutil.copytree(os.path.join(apptools, "apptools-android-tests"), apptools_android_tests)
fp = open(apptools_android_tests + '/arch.txt', 'w+')
fp.write("arm")
fp.close()
if platform.system() != "Linux":
hp = open(apptools_android_tests + "/host.txt", 'w+')
hp.write("Windows")
hp.close()
else:
hp = open(apptools_android_tests + "/host.txt", 'w+')
hp.write("Android")
hp.close()
if BUILD_PARAMETERS.arch == "64bit":
vp_64 = open(apptools_android_tests + "/version.txt", 'w+')
vp_64.write(android_crosswalk_version + " 64")
vp_64.close()
os.chdir(os.path.join(apptools_android_tests, "tools"))
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/stable/" + android_crosswalk_version + "/crosswalk-" + android_crosswalk_version + "-64bit.zip").read()
with open("crosswalk-" + android_crosswalk_version + "-64bit.zip", 'wb') as f:
f.write(data)
else:
vp_32 = open(apptools_android_tests + "/version.txt", 'w+')
vp_32.write(android_crosswalk_version + " 32")
vp_32.close()
os.chdir(os.path.join(apptools_android_tests, "tools"))
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/stable/" + android_crosswalk_version + "/crosswalk-" + android_crosswalk_version + ".zip").read()
with open("crosswalk-" + android_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
os.chdir(os.path.join(os.path.join(apptools_android_tests, "apptools"), "CI"))
if platform.system() != "Linux":
retval = os.system("python -m unittest discover --pattern=crosswalk_pkg_basic.py > null")
else:
retval = os.system("python -m unittest discover --pattern=*.py")
elif BUILD_PARAMETERS.platform == "windows":
os.environ['CROSSWALK_APP_TOOLS_CACHE_DIR'] = os.path.join(apptools_windows_tests, "tools")
shutil.copytree(os.path.join(apptools, "apptools-windows-tests"), apptools_windows_tests)
os.chdir(os.path.join(apptools_windows_tests, "tools"))
windows_crosswalk_version = crosswalk_version("canary", BUILD_PARAMETERS.platform)
try:
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/canary/" + windows_crosswalk_version + "/crosswalk-" + windows_crosswalk_version + ".zip").read()
with open("crosswalk-" + windows_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
except Exception as e:
data = urllib2.urlopen("https://download.01.org/crosswalk/releases/crosswalk/" + BUILD_PARAMETERS.platform + "/canary/" + windows_crosswalk_version + "/crosswalk64-" + windows_crosswalk_version + ".zip").read()
with open("crosswalk64-" + windows_crosswalk_version + ".zip", 'wb') as f:
f.write(data)
os.chdir(os.path.join(os.path.join(apptools_windows_tests, "apptools"), "CI"))
retval = os.system("python -m unittest discover --pattern=*.py > null")
elif BUILD_PARAMETERS.platform == "ios":
shutil.copytree(os.path.join(apptools, "apptools-ios-tests"), apptools_ios_tests)
os.chdir(os.path.join(os.path.join(apptools_ios_tests, "apptools"), "CI"))
retval = os.system("python -m unittest discover --pattern=*.py > null")
return retval
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | 3,776,121,237,849,668,600 | 46.732824 | 226 | 0.618743 | false |
fintech-circle/edx-platform | cms/djangoapps/contentstore/tests/test_libraries.py | 11 | 43471 | """
Content library unit tests that require the CMS runtime.
"""
import ddt
from django.test.utils import override_settings
from mock import Mock, patch
from opaque_keys.edx.locator import CourseKey, LibraryLocator
from contentstore.tests.utils import AjaxEnabledTestClient, parse_json
from contentstore.utils import reverse_library_url, reverse_url, reverse_usage_url
from contentstore.views.item import _duplicate_item
from contentstore.views.preview import _load_preview_module
from contentstore.views.tests.test_library import LIBRARY_REST_URL
from course_creators.views import add_user_with_status_granted
from openedx.core.djangoapps.content.course_structures.tests import SignalDisconnectTestMixin
from student import auth
from student.auth import has_studio_read_access, has_studio_write_access
from student.roles import (
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole,
OrgInstructorRole,
OrgLibraryUserRole,
OrgStaffRole
)
from student.tests.factories import UserFactory
from xblock_django.user_service import DjangoXBlockUserService
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDIO_VIEW
class LibraryTestCase(ModuleStoreTestCase):
"""
Common functionality for content libraries tests
"""
def setUp(self):
super(LibraryTestCase, self).setUp()
self.user = UserFactory(password=self.user_password, is_staff=True)
self.client = AjaxEnabledTestClient()
self._login_as_staff_user(logout_first=False)
self.lib_key = self._create_library()
self.library = modulestore().get_library(self.lib_key)
self.session_data = {} # Used by _bind_module
def _login_as_staff_user(self, logout_first=True):
""" Login as a staff user """
if logout_first:
self.client.logout()
self.client.login(username=self.user.username, password=self.user_password)
def _create_library(self, org="org", library="lib", display_name="Test Library"):
"""
Helper method used to create a library. Uses the REST API.
"""
response = self.client.ajax_post(LIBRARY_REST_URL, {
'org': org,
'library': library,
'display_name': display_name,
})
self.assertEqual(response.status_code, 200)
lib_info = parse_json(response)
lib_key = CourseKey.from_string(lib_info['library_key'])
self.assertIsInstance(lib_key, LibraryLocator)
return lib_key
def _add_library_content_block(self, course, library_key, publish_item=False, other_settings=None):
"""
Helper method to add a LibraryContent block to a course.
The block will be configured to select content from the library
specified by library_key.
other_settings can be a dict of Scope.settings fields to set on the block.
"""
return ItemFactory.create(
category='library_content',
parent_location=course.location,
user_id=self.user.id,
publish_item=publish_item,
source_library_id=unicode(library_key),
**(other_settings or {})
)
def _add_simple_content_block(self):
""" Adds simple HTML block to library """
return ItemFactory.create(
category="html", parent_location=self.library.location,
user_id=self.user.id, publish_item=False
)
def _refresh_children(self, lib_content_block, status_code_expected=200):
"""
Helper method: Uses the REST API to call the 'refresh_children' handler
of a LibraryContent block
"""
if 'user' not in lib_content_block.runtime._services: # pylint: disable=protected-access
user_service = DjangoXBlockUserService(self.user)
lib_content_block.runtime._services['user'] = user_service # pylint: disable=protected-access
handler_url = reverse_usage_url(
'component_handler',
lib_content_block.location,
kwargs={'handler': 'refresh_children'}
)
response = self.client.ajax_post(handler_url)
self.assertEqual(response.status_code, status_code_expected)
return modulestore().get_item(lib_content_block.location)
def _bind_module(self, descriptor, user=None):
"""
Helper to use the CMS's module system so we can access student-specific fields.
"""
if user is None:
user = self.user
if user not in self.session_data:
self.session_data[user] = {}
request = Mock(user=user, session=self.session_data[user])
_load_preview_module(request, descriptor)
def _update_item(self, usage_key, metadata):
"""
Helper method: Uses the REST API to update the fields of an XBlock.
This will result in the XBlock's editor_saved() method being called.
"""
update_url = reverse_usage_url("xblock_handler", usage_key)
return self.client.ajax_post(
update_url,
data={
'metadata': metadata,
}
)
def _list_libraries(self):
"""
Use the REST API to get a list of libraries visible to the current user.
"""
response = self.client.get_json(LIBRARY_REST_URL)
self.assertEqual(response.status_code, 200)
return parse_json(response)
@ddt.ddt
class TestLibraries(LibraryTestCase):
"""
High-level tests for libraries
"""
@ddt.data(
(2, 1, 1),
(2, 2, 2),
(2, 20, 2),
)
@ddt.unpack
def test_max_items(self, num_to_create, num_to_select, num_expected):
"""
Test the 'max_count' property of LibraryContent blocks.
"""
for _ in range(num_to_create):
self._add_simple_content_block()
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, other_settings={'max_count': num_to_select})
self.assertEqual(len(lc_block.children), 0)
lc_block = self._refresh_children(lc_block)
# Now, we want to make sure that .children has the total # of potential
# children, and that get_child_descriptors() returns the actual children
# chosen for a given student.
# In order to be able to call get_child_descriptors(), we must first
# call bind_for_student:
self._bind_module(lc_block)
self.assertEqual(len(lc_block.children), num_to_create)
self.assertEqual(len(lc_block.get_child_descriptors()), num_expected)
def test_consistent_children(self):
"""
Test that the same student will always see the same selected child block
"""
# Create many blocks in the library and add them to a course:
for num in range(8):
ItemFactory.create(
data="This is #{}".format(num + 1),
category="html", parent_location=self.library.location, user_id=self.user.id, publish_item=False
)
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
lc_block = self._add_library_content_block(course, self.lib_key, {'max_count': 1})
lc_block_key = lc_block.location
lc_block = self._refresh_children(lc_block)
def get_child_of_lc_block(block):
"""
Fetch the child shown to the current user.
"""
children = block.get_child_descriptors()
self.assertEqual(len(children), 1)
return children[0]
# Check which child a student will see:
self._bind_module(lc_block)
chosen_child = get_child_of_lc_block(lc_block)
chosen_child_defn_id = chosen_child.definition_locator.definition_id
lc_block.save()
modulestore().update_item(lc_block, self.user.id)
# Now re-load the block and try again:
def check():
"""
Confirm that chosen_child is still the child seen by the test student
"""
for _ in range(6): # Repeat many times b/c blocks are randomized
lc_block = modulestore().get_item(lc_block_key) # Reload block from the database
self._bind_module(lc_block)
current_child = get_child_of_lc_block(lc_block)
self.assertEqual(current_child.location, chosen_child.location)
self.assertEqual(current_child.data, chosen_child.data)
self.assertEqual(current_child.definition_locator.definition_id, chosen_child_defn_id)
check()
# Refresh the children:
lc_block = self._refresh_children(lc_block)
# Now re-load the block and try yet again, in case refreshing the children changed anything:
check()
def test_definition_shared_with_library(self):
"""
Test that the same block definition is used for the library and course[s]
"""
block1 = self._add_simple_content_block()
def_id1 = block1.definition_locator.definition_id
block2 = self._add_simple_content_block()
def_id2 = block2.definition_locator.definition_id
self.assertNotEqual(def_id1, def_id2)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
for child_key in lc_block.children:
child = modulestore().get_item(child_key)
def_id = child.definition_locator.definition_id
self.assertIn(def_id, (def_id1, def_id2))
def test_fields(self):
"""
Test that blocks used from a library have the same field values as
defined by the library author.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
lib_block = ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(lib_block.data, data_value)
self.assertEqual(lib_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
course_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(course_block.data, data_value)
self.assertEqual(course_block.display_name, name_value)
def test_block_with_children(self):
"""
Test that blocks used from a library can have children.
"""
data_value = "A Scope.content value"
name_value = "A Scope.settings value"
# In the library, create a vertical block with a child:
vert_block = ItemFactory.create(
category="vertical",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
child_block = ItemFactory.create(
category="html",
parent_location=vert_block.location,
user_id=self.user.id,
publish_item=False,
display_name=name_value,
data=data_value,
)
self.assertEqual(child_block.data, data_value)
self.assertEqual(child_block.display_name, name_value)
# Next, create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
course_vert_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(len(course_vert_block.children), 1)
course_child_block = modulestore().get_item(course_vert_block.children[0])
self.assertEqual(course_child_block.data, data_value)
self.assertEqual(course_child_block.display_name, name_value)
def test_change_after_first_sync(self):
"""
Check that nothing goes wrong if we (A) Set up a LibraryContent block
and use it successfully, then (B) Give it an invalid configuration.
No children should be deleted until the configuration is fixed.
"""
# Add a block to the library:
data_value = "Hello world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="HTML BLock",
data=data_value,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1) # Children should not be deleted due to a bad setting.
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data_value)
def test_refreshes_children_if_libraries_change(self):
""" Tests that children are automatically refreshed if libraries list changes """
library2key = self._create_library("org2", "lib2", "Library2")
library2 = modulestore().get_library(library2key)
data1, data2 = "Hello world!", "Hello other world!"
ItemFactory.create(
category="html",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib1: HTML BLock",
data=data1,
)
ItemFactory.create(
category="html",
parent_location=library2.location,
user_id=self.user.id,
publish_item=False,
display_name="Lib 2: HTML BLock",
data=data2,
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 1)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": str(library2key)},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.data, data2)
@patch("xmodule.library_tools.SearchEngine.get_search_engine", Mock(return_value=None, autospec=True))
def test_refreshes_children_if_capa_type_change(self):
""" Tests that children are automatically refreshed if capa type field changes """
name1, name2 = "Option Problem", "Multiple Choice Problem"
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name1,
data="<problem><optionresponse></optionresponse></problem>",
)
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
display_name=name2,
data="<problem><multiplechoiceresponse></multiplechoiceresponse></problem>",
)
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 2)
resp = self._update_item(
lc_block.location,
{"capa_type": 'optionresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name1)
resp = self._update_item(
lc_block.location,
{"capa_type": 'multiplechoiceresponse'},
)
self.assertEqual(resp.status_code, 200)
lc_block = modulestore().get_item(lc_block.location)
self.assertEqual(len(lc_block.children), 1)
html_block = modulestore().get_item(lc_block.children[0])
self.assertEqual(html_block.display_name, name2)
def test_refresh_fails_for_unknown_library(self):
""" Tests that refresh children fails if unknown library is configured """
# Create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
# Add a LibraryContent block to the course:
lc_block = self._add_library_content_block(course, self.lib_key)
lc_block = self._refresh_children(lc_block)
self.assertEqual(len(lc_block.children), 0)
# Now, change the block settings to have an invalid library key:
resp = self._update_item(
lc_block.location,
{"source_library_id": "library-v1:NOT+FOUND"},
)
self.assertEqual(resp.status_code, 200)
with self.assertRaises(ValueError):
self._refresh_children(lc_block, status_code_expected=400)
@ddt.ddt
@patch('django.conf.settings.SEARCH_ENGINE', None)
class TestLibraryAccess(SignalDisconnectTestMixin, LibraryTestCase):
"""
Test Roles and Permissions related to Content Libraries
"""
def setUp(self):
""" Create a library, staff user, and non-staff user """
super(TestLibraryAccess, self).setUp()
self.non_staff_user_password = 'foo'
self.non_staff_user = UserFactory(password=self.non_staff_user_password, is_staff=False)
def _login_as_non_staff_user(self, logout_first=True):
""" Login as a user that starts out with no roles/permissions granted. """
if logout_first:
self.client.logout() # We start logged in as a staff user
self.client.login(username=self.non_staff_user.username, password=self.non_staff_user_password)
def _assert_cannot_create_library(self, org="org", library="libfail", expected_code=403):
""" Ensure the current user is not able to create a library. """
self.assertGreaterEqual(expected_code, 300)
response = self.client.ajax_post(
LIBRARY_REST_URL,
{'org': org, 'library': library, 'display_name': "Irrelevant"}
)
self.assertEqual(response.status_code, expected_code)
key = LibraryLocator(org=org, library=library)
self.assertEqual(modulestore().get_library(key), None)
def _can_access_library(self, library):
"""
Use the normal studio library URL to check if we have access
`library` can be a LibraryLocator or the library's root XBlock
"""
if isinstance(library, (basestring, LibraryLocator)):
lib_key = library
else:
lib_key = library.location.library_key
response = self.client.get(reverse_library_url('library_handler', unicode(lib_key)))
self.assertIn(response.status_code, (200, 302, 403))
return response.status_code == 200
def tearDown(self):
"""
Log out when done each test
"""
self.client.logout()
super(TestLibraryAccess, self).tearDown()
def test_creation(self):
"""
The user that creates a library should have instructor (admin) and staff permissions
"""
# self.library has been auto-created by the staff user.
self.assertTrue(has_studio_write_access(self.user, self.lib_key))
self.assertTrue(has_studio_read_access(self.user, self.lib_key))
# Make sure the user was actually assigned the instructor role and not just using is_staff superpowers:
self.assertTrue(CourseInstructorRole(self.lib_key).has_user(self.user))
# Now log out and ensure we are forbidden from creating a library:
self.client.logout()
self._assert_cannot_create_library(expected_code=302) # 302 redirect to login expected
# Now check that logged-in users without CourseCreator role cannot create libraries
self._login_as_non_staff_user(logout_first=False)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
self._assert_cannot_create_library(expected_code=403) # 403 user is not CourseCreator
# Now check that logged-in users with CourseCreator role can create libraries
add_user_with_status_granted(self.user, self.non_staff_user)
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_CREATOR_GROUP': True}):
lib_key2 = self._create_library(library="lib2", display_name="Test Library 2")
library2 = modulestore().get_library(lib_key2)
self.assertIsNotNone(library2)
@ddt.data(
CourseInstructorRole,
CourseStaffRole,
LibraryUserRole,
)
def test_acccess(self, access_role):
"""
Test the various roles that allow viewing libraries are working correctly.
"""
# At this point, one library exists, created by the currently-logged-in staff user.
# Create another library as staff:
library2_key = self._create_library(library="lib2")
# Login as non_staff_user:
self._login_as_non_staff_user()
# non_staff_user shouldn't be able to access any libraries:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 0)
self.assertFalse(self._can_access_library(self.library))
self.assertFalse(self._can_access_library(library2_key))
# Now manually intervene to give non_staff_user access to library2_key:
access_role(library2_key).add_users(self.non_staff_user)
# Now non_staff_user should be able to access library2_key only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], unicode(library2_key))
self.assertTrue(self._can_access_library(library2_key))
self.assertFalse(self._can_access_library(self.library))
@ddt.data(
OrgStaffRole,
OrgInstructorRole,
OrgLibraryUserRole,
)
def test_org_based_access(self, org_access_role):
"""
Test the various roles that allow viewing all of an organization's
libraries are working correctly.
"""
# Create some libraries as the staff user:
lib_key_pacific = self._create_library(org="PacificX", library="libP")
lib_key_atlantic = self._create_library(org="AtlanticX", library="libA")
# Login as a non-staff:
self._login_as_non_staff_user()
# Now manually intervene to give non_staff_user access to all "PacificX" libraries:
org_access_role(lib_key_pacific.org).add_users(self.non_staff_user)
# Now non_staff_user should be able to access lib_key_pacific only:
lib_list = self._list_libraries()
self.assertEqual(len(lib_list), 1)
self.assertEqual(lib_list[0]["library_key"], unicode(lib_key_pacific))
self.assertTrue(self._can_access_library(lib_key_pacific))
self.assertFalse(self._can_access_library(lib_key_atlantic))
self.assertFalse(self._can_access_library(self.lib_key))
@ddt.data(True, False)
def test_read_only_role(self, use_org_level_role):
"""
Test the read-only role (LibraryUserRole and its org-level equivalent)
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# Login as a non_staff_user:
self._login_as_non_staff_user()
self.assertFalse(self._can_access_library(self.library))
block_url = reverse_usage_url('xblock_handler', block.location)
def can_read_block():
""" Check if studio lets us view the XBlock in the library """
response = self.client.get_json(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_edit_block():
""" Check if studio lets us edit the XBlock in the library """
response = self.client.ajax_post(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_delete_block():
""" Check if studio lets us delete the XBlock in the library """
response = self.client.delete(block_url)
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_copy_block():
""" Check if studio lets us duplicate the XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(self.library.location),
'duplicate_source_locator': unicode(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
def can_create_block():
""" Check if studio lets us make a new XBlock in the library """
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(self.library.location), 'category': 'html',
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
return response.status_code == 200
# Check that we do not have read or write access to block:
self.assertFalse(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
# Give non_staff_user read-only permission:
if use_org_level_role:
OrgLibraryUserRole(self.lib_key.org).add_users(self.non_staff_user)
else:
LibraryUserRole(self.lib_key).add_users(self.non_staff_user)
self.assertTrue(self._can_access_library(self.library))
self.assertTrue(can_read_block())
self.assertFalse(can_edit_block())
self.assertFalse(can_delete_block())
self.assertFalse(can_copy_block())
self.assertFalse(can_create_block())
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_duplicate_across_courses(self, library_role, course_role, expected_result):
"""
Test that the REST API will correctly allow/refuse when copying
from a library with (write, read, or no) access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
block = self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Copy block to the course:
response = self.client.ajax_post(reverse_url('xblock_handler'), {
'parent_locator': unicode(course.location),
'duplicate_source_locator': unicode(block.location),
})
self.assertIn(response.status_code, (200, 403)) # 400 would be ambiguous
duplicate_action_allowed = (response.status_code == 200)
self.assertEqual(duplicate_action_allowed, expected_result)
@ddt.data(
(LibraryUserRole, CourseStaffRole, True),
(CourseStaffRole, CourseStaffRole, True),
(None, CourseStaffRole, False),
(LibraryUserRole, None, False),
)
@ddt.unpack
def test_refresh_library_content_permissions(self, library_role, course_role, expected_result):
"""
Test that the LibraryContent block's 'refresh_children' handler will correctly
handle permissions and allow/refuse when updating its content with the latest
version of a library. We try updating from a library with (write, read, or no)
access to a course with (write or no) access.
"""
# As staff user, add a block to self.library:
self._add_simple_content_block()
# And create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
self._login_as_non_staff_user()
# Assign roles:
if library_role:
library_role(self.lib_key).add_users(self.non_staff_user)
if course_role:
course_role(course.location.course_key).add_users(self.non_staff_user)
# Try updating our library content block:
lc_block = self._add_library_content_block(course, self.lib_key)
# We must use the CMS's module system in order to get permissions checks.
self._bind_module(lc_block, user=self.non_staff_user)
lc_block = self._refresh_children(lc_block, status_code_expected=200 if expected_result else 403)
self.assertEqual(len(lc_block.children), 1 if expected_result else 0)
def test_studio_user_permissions(self):
"""
Test that user could attach to the problem only libraries that he has access (or which were created by him).
This test was created on the basis of bug described in the pull requests on github:
https://github.com/edx/edx-platform/pull/11331
https://github.com/edx/edx-platform/pull/11611
"""
self._create_library(org='admin_org_1', library='lib_adm_1', display_name='admin_lib_1')
self._create_library(org='admin_org_2', library='lib_adm_2', display_name='admin_lib_2')
self._login_as_non_staff_user()
self._create_library(org='staff_org_1', library='lib_staff_1', display_name='staff_lib_1')
self._create_library(org='staff_org_2', library='lib_staff_2', display_name='staff_lib_2')
with modulestore().default_store(ModuleStoreEnum.Type.split):
course = CourseFactory.create()
instructor_role = CourseInstructorRole(course.id)
auth.add_users(self.user, instructor_role, self.non_staff_user)
lib_block = ItemFactory.create(
category='library_content',
parent_location=course.location,
user_id=self.non_staff_user.id,
publish_item=False
)
def _get_settings_html():
"""
Helper function to get block settings HTML
Used to check the available libraries.
"""
edit_view_url = reverse_usage_url("xblock_view_handler", lib_block.location, {"view_name": STUDIO_VIEW})
resp = self.client.get_json(edit_view_url)
self.assertEquals(resp.status_code, 200)
return parse_json(resp)['html']
self._login_as_staff_user()
staff_settings_html = _get_settings_html()
self.assertIn('staff_lib_1', staff_settings_html)
self.assertIn('staff_lib_2', staff_settings_html)
self.assertIn('admin_lib_1', staff_settings_html)
self.assertIn('admin_lib_2', staff_settings_html)
self._login_as_non_staff_user()
response = self.client.get_json(LIBRARY_REST_URL)
staff_libs = parse_json(response)
self.assertEquals(2, len(staff_libs))
non_staff_settings_html = _get_settings_html()
self.assertIn('staff_lib_1', non_staff_settings_html)
self.assertIn('staff_lib_2', non_staff_settings_html)
self.assertNotIn('admin_lib_1', non_staff_settings_html)
self.assertNotIn('admin_lib_2', non_staff_settings_html)
@ddt.ddt
@override_settings(SEARCH_ENGINE=None)
class TestOverrides(LibraryTestCase):
"""
Test that overriding block Scope.settings fields from a library in a specific course works
"""
def setUp(self):
super(TestOverrides, self).setUp()
self.original_display_name = "A Problem Block"
self.original_weight = 1
# Create a problem block in the library:
self.problem = ItemFactory.create(
category="problem",
parent_location=self.library.location,
display_name=self.original_display_name, # display_name is a Scope.settings field
weight=self.original_weight, # weight is also a Scope.settings field
user_id=self.user.id,
publish_item=False,
)
# Refresh library now that we've added something.
self.library = modulestore().get_library(self.lib_key)
# Also create a course:
with modulestore().default_store(ModuleStoreEnum.Type.split):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
def test_overrides(self):
"""
Test that we can override Scope.settings values in a course.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
# Add a second LibraryContent block to the course, with no override:
lc_block2 = self._add_library_content_block(self.course, self.lib_key)
lc_block2 = self._refresh_children(lc_block2)
# Re-load the two problem blocks - one with and one without an override:
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
problem2_in_course = modulestore().get_item(lc_block2.children[0])
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
self.assertEqual(problem2_in_course.weight, self.original_weight)
def test_reset_override(self):
"""
If we override a setting and then reset it, we should get the library value.
"""
new_display_name = "Modified Problem Title"
new_weight = 10
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Reset:
for field_name in ["display_name", "weight"]:
self.problem_in_course.fields[field_name].delete_from(self.problem_in_course)
# Save, reload, and verify:
modulestore().update_item(self.problem_in_course, self.user.id)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
self.assertEqual(self.problem_in_course.weight, self.original_weight)
def test_consistent_definitions(self):
"""
Make sure that the new child of the LibraryContent block
shares its definition with the original (self.problem).
This test is specific to split mongo.
"""
definition_id = self.problem.definition_locator.definition_id
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
# Now even if we change some Scope.settings fields and refresh, the definition should be unchanged
self.problem.weight = 20
self.problem.display_name = "NEW"
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem.definition_locator.definition_id, definition_id)
self.assertEqual(self.problem_in_course.definition_locator.definition_id, definition_id)
@ddt.data(False, True)
def test_persistent_overrides(self, duplicate):
"""
Test that when we override Scope.settings values in a course,
the override values persist even when the block is refreshed
with updated blocks from the library.
"""
new_display_name = "Modified Problem Title"
new_weight = 15
self.problem_in_course.display_name = new_display_name
self.problem_in_course.weight = new_weight
modulestore().update_item(self.problem_in_course, self.user.id)
if duplicate:
# Check that this also works when the RCB is duplicated.
self.lc_block = modulestore().get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
self.problem_in_course = modulestore().get_item(self.lc_block.children[0])
else:
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
# Change the settings in the library version:
self.problem.display_name = "X"
self.problem.weight = 99
new_data_value = "<problem><p>Changed data to check that non-overriden fields *do* get updated.</p></problem>"
self.problem.data = new_data_value
modulestore().update_item(self.problem, self.user.id)
self.lc_block = self._refresh_children(self.lc_block)
self.problem_in_course = modulestore().get_item(self.problem_in_course.location)
self.assertEqual(self.problem_in_course.display_name, new_display_name)
self.assertEqual(self.problem_in_course.weight, new_weight)
self.assertEqual(self.problem_in_course.data, new_data_value)
def test_duplicated_version(self):
"""
Test that if a library is updated, and the content block is duplicated,
the new block will use the old library version and not the new one.
"""
store = modulestore()
self.assertEqual(len(self.library.children), 1)
self.assertEqual(len(self.lc_block.children), 1)
# Edit the only problem in the library:
self.problem.display_name = "--changed in library--"
store.update_item(self.problem, self.user.id)
# Create an additional problem block in the library:
ItemFactory.create(
category="problem",
parent_location=self.library.location,
user_id=self.user.id,
publish_item=False,
)
# Refresh our reference to the library
self.library = store.get_library(self.lib_key)
# Refresh our reference to the block
self.lc_block = store.get_item(self.lc_block.location)
self.problem_in_course = store.get_item(self.problem_in_course.location)
# The library has changed...
self.assertEqual(len(self.library.children), 2)
# But the block hasn't.
self.assertEqual(len(self.lc_block.children), 1)
self.assertEqual(self.problem_in_course.location, self.lc_block.children[0])
self.assertEqual(self.problem_in_course.display_name, self.original_display_name)
# Duplicate self.lc_block:
duplicate = store.get_item(
_duplicate_item(self.course.location, self.lc_block.location, self.user)
)
# The duplicate should have identical children to the original:
self.assertEqual(len(duplicate.children), 1)
self.assertTrue(self.lc_block.source_library_version)
self.assertEqual(self.lc_block.source_library_version, duplicate.source_library_version)
problem2_in_course = store.get_item(duplicate.children[0])
self.assertEqual(problem2_in_course.display_name, self.original_display_name)
class TestIncompatibleModuleStore(LibraryTestCase):
"""
Tests for proper validation errors with an incompatible course modulestore.
"""
def setUp(self):
super(TestIncompatibleModuleStore, self).setUp()
# Create a course in an incompatible modulestore.
with modulestore().default_store(ModuleStoreEnum.Type.mongo):
self.course = CourseFactory.create()
# Add a LibraryContent block to the course:
self.lc_block = self._add_library_content_block(self.course, self.lib_key)
def test_incompatible_modulestore(self):
"""
Verifies that, if a user is using a modulestore that doesn't support libraries,
a validation error will be produced.
"""
validation = self.lc_block.validate()
self.assertEqual(validation.summary.type, validation.summary.ERROR)
self.assertIn(
"This course does not support content libraries.", validation.summary.text)
| agpl-3.0 | 6,542,055,603,930,049,000 | 41.744346 | 118 | 0.640013 | false |
qtekfun/htcDesire820Kernel | external/chromium_org/tools/telemetry/telemetry/core/backends/form_based_credentials_backend_unittest_base.py | 24 | 4466 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.core import util
from telemetry.unittest import simple_mock
from telemetry.unittest import options_for_unittests
from telemetry.unittest import DisabledTest
_ = simple_mock.DONT_CARE
def _GetCredentialsPath():
# TODO: This shouldn't depend on tools/perf.
credentials_path = os.path.join(util.GetChromiumSrcDir(),
'tools', 'perf', 'data', 'credentials.json')
if not os.path.exists(credentials_path):
return None
return credentials_path
class FormBasedCredentialsBackendUnitTestBase(unittest.TestCase):
def setUp(self):
self._credentials_type = None
@DisabledTest
def testRealLoginIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
with browser_finder.FindBrowser(options).Create() as b:
b.Start()
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
ret = b.credentials.LoginNeeded(b.tabs[0], self._credentials_type)
self.assertTrue(ret)
@DisabledTest
def testRealLoginWithDontOverrideProfileIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
# Login once to make sure our default profile is logged in.
with browser_finder.FindBrowser(options).Create() as b:
b.Start()
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
tab = b.tabs[0]
# Should not be logged in, since this is a fresh credentials
# instance.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
# Log in.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure login was successful.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
# Reset state. Now the backend thinks we're logged out, even
# though we are logged in in our current browser session. This
# simulates the effects of running with --dont-override-profile.
b.credentials._ResetLoggedInState() # pylint: disable=W0212
# Make sure the backend thinks we're logged out.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
self.assertTrue(b.credentials.CanLogin(self._credentials_type))
# Attempt to login again. This should detect that we've hit
# the 'logged in' page instead of the login form, and succeed
# instead of timing out.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure our login attempt did in fact succeed and set the
# backend's internal state to 'logged in'.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
def testLoginUsingMock(self):
raise NotImplementedError()
def _LoginUsingMock(self, backend, login_page_url, email_element_id,
password_element_id): # pylint: disable=R0201
tab = simple_mock.MockObject()
config = {'username': 'blah',
'password': 'blargh'}
tab.ExpectCall('Navigate', login_page_url)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(True)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
tab.ExpectCall('WaitForDocumentReadyStateToBeInteractiveOrBetter')
def VerifyEmail(js):
assert email_element_id in js
assert 'blah' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyEmail)
def VerifyPw(js):
assert password_element_id in js
assert 'largh' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyPw)
def VerifySubmit(js):
assert '.submit' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifySubmit)
# Checking for form still up.
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
backend.LoginNeeded(tab, config)
| gpl-2.0 | -3,466,527,188,530,815,000 | 33.890625 | 72 | 0.70936 | false |
AvsPmod/AvsPmod | macros/Examples/[3] Manual Telecide.py | 1 | 7720 | # This is an advanced macro designed to aid in the process of manual
# deinterlacing. In order to use this macro, you must first have a general
# understanding of the Telecide AviSynth filter, otherwise most of the stuff
# the macro does (and most of the description here) will be meaningless to
# you. You can learn everything you need to know about the Telecide filter
# from the Decomb tutorial and reference manual in the AviSynth help.
#
# The macro itself has three different modes - the first mode retrieves
# the source file and sets up a script to help determine the field order,
# the second mode creates several scripts related to the Telecide filter,
# and the third mode writes override information to a text file, line by
# line. The following is a general description of how to use the macro.
#
# To begin, first start up AvsP.exe and run the macro. It will prompt you for
# the source file, then it creates a script with the top and bottom
# fields of the video seperated and stacked vertically. Go through some of
# the video frame by frame to determine the field order. When you determined
# the field order, run the macro again, this time it will prompt you for the
# field order (0 or 1), then it creates and saves four scripts. The first
# script is the source with Telecide applied, the other three scripts
# represent the frames which the filter Telecide can choose from, "current",
# "next", and "previous". Once these scripts are open, go through the
# Telecide script (the first script) frame by frame, starting from the
# beginning. If you see an interlaced frame, look at the "current",
# "next", and "previous" scripts to see if there's a better non-interlaced
# frame. If so, running the macro again will record the frame number and the
# letter "c", "n", or "p" (depending on which tab is currently selected) into
# the override text file, and show the Telecide video again, this time with
# the overridden frame. Note that running the macro when the first tab is
# selected (the Telecide tab) is not appropriate, and the macro issues a
# warning with a MsgBox.
import os
if avsp.GetTabCount() == 1 and avsp.GetText() == '':
#== FIRST MODE ==#
# Get the filename of the source to de-interlace from a dialog box
filename = avsp.GetFilename(_('Open a source to Telecide'))
if filename:
# Make the script to determine field order
# Note: the filename is stored on the first line, used by the second mode
avsp.InsertText('# TELECIDE FILENAME: %s\n' % filename)
srcstring = avsp.GetSourceString(filename)
avsp.InsertText('src = %s\n' % srcstring)
avsp.InsertText(
'top = src.AssumeTFF().SeparateFields().Subtitle("order = 1")\n'
'bot = src.AssumeBFF().SeparateFields().Subtitle("order = 0")\n'
'StackVertical(top, bot)\n'
)
avsp.ShowVideoFrame(0)
elif avsp.GetTabCount() == 1 and avsp.GetText().startswith('# TELECIDE FILENAME: '):
#== SECOND MODE ==#
# Get the filename of the source from the first line of the script
firstline = avsp.GetText().split('\n')[0]
filename = firstline.split('# TELECIDE FILENAME: ')[1]
# If the filename somehow got mangled, get it again with a dialog box
if not os.path.isfile(filename):
avsp.MsgBox(_('Filename was mangled! Get it again!'), _('Error'))
filename = avsp.GetFilename(_('Open a source to Telecide'))
# Get the field order from the user, make sure it's either 0 or 1
order = avsp.GetTextEntry(_('Enter the field order:'))
try:
order = int(order)
if order not in (0,1):
avsp.MsgBox(_('Must enter either a 0 or 1!'))
except ValueError:
avsp.MsgBox(_('Must enter an integer!'))
# Make the Telecide-related scripts
if filename and order in (0,1):
# Close the field order script
avsp.CloseTab(0)
# Make the telecide override text file (empty for now)
dir, base = os.path.split(filename)
ovrName = 'telecide_override.txt'
f = open(os.path.join(dir, ovrName), 'w')
#~ f.write('\n')
f.close()
# Make the telecide script
name = os.path.join(dir, 'telecideBase.avs')
avsp.InsertText('# TELECIDE OVERRIDE NAME: %s\n' % os.path.join(dir, ovrName))
srcstring = avsp.GetSourceString(filename)
avsp.InsertText('%s\n' % srcstring)
avsp.InsertText('Telecide(order=%i, guide=1, post=0, show=true, ovr="%s")\n' % (order, ovrName))
avsp.SaveScript(name)
# Make the "current" script
name = os.path.join(dir, 'telecideCurrent.avs')
avsp.NewTab()
avsp.InsertText('%s\nSubtitle("Current")\n' % srcstring)
avsp.SaveScript(name)
# Make the "next" script
name = os.path.join(dir, 'telecideNext.avs')
avsp.NewTab()
avsp.InsertText('src = %s\n' % srcstring)
if order == 0:
avsp.InsertText('sep = src.AssumeBFF().SeparateFields()\n')
else:
avsp.InsertText('sep = src.AssumeTFF().SeparateFields()\n')
avsp.InsertText(
'first = sep.SelectEven().Trim(1,0)\n'
'second = sep.SelectOdd()\n'
'new = Interleave(first, second).Weave()\n'
'new.Subtitle("Next")\n'
)
avsp.SaveScript(name)
# Make the "previous" script
name = os.path.join(dir, 'telecidePrevious.avs')
avsp.NewTab()
avsp.InsertText('src = %s\n' % srcstring)
if order == 0:
avsp.InsertText('sep = src.AssumeBFF().SeparateFields()\n')
else:
avsp.InsertText('sep = src.AssumeTFF().SeparateFields()\n')
avsp.InsertText(
'first = sep.SelectEven()\n'
'second = sep.SelectOdd().DuplicateFrame(0).Trim(0, src.Framecount()-1)\n'
'new = Interleave(first, second).Weave()\n'
'new.Subtitle("Previous")\n'
)
avsp.SaveScript(name)
# Select the first tab and show the video preview
avsp.SelectTab(0)
avsp.ShowVideoFrame(0)
elif avsp.GetTabCount() == 4 and avsp.GetText(0).startswith('# TELECIDE OVERRIDE NAME: '):
#== THIRD MODE ==#
# Get the filename of the override text file from the first line of the script
firstline = avsp.GetText(0).split('\n')[0]
filename = firstline.split('# TELECIDE OVERRIDE NAME: ')[1]
# If the filename somehow got mangled, get it again with a dialog box
if not os.path.isfile(filename):
avsp.MsgBox(_('Override filename was mangled! Get it again!'), _('Error'))
filename = avsp.GetFilename('Get the Telecide overrride text file')
if filename:
# Get the index of the currently selected tab
index = avsp.GetCurrentTabIndex()
if index == 0:
# Don't write anything if base Telecide tab was selected
avsp.MsgBox(_('Not allowed to select base Telecide tab!'), _('Error'))
else:
# Create the text to write depending on which tab was selected
frame = avsp.GetFrameNumber()
if index == 1:
txt = '%s c\n' % frame
elif index == 2:
txt = '%s n\n' % frame
elif index == 3:
txt = '%s p\n' % frame
# Write the text to the override file
f = open(filename, 'a')
f.write(txt)
f.close()
# Show the video of the Telecide script
# Force the video to refresh (AviSynth script hasn't changed, but override file has)
avsp.ShowVideoFrame(index=0, forceRefresh=True)
else:
# Unknown mode
avsp.MsgBox(_('Unknown mode!'), _('Error'))
| gpl-2.0 | -3,162,405,519,899,438,600 | 48.178344 | 104 | 0.635622 | false |
stimpsonsg/moose | python/MooseDocs/commands/generate.py | 4 | 1841 | import os
import MooseDocs
import mooseutils
import logging
log = logging.getLogger(__name__)
def generate_options(parser, subparser):
"""
Command-line options for generate command.
"""
generate_parser = subparser.add_parser('generate', help="Check that documentation exists for your application and generate the markdown documentation from MOOSE application executable.")
generate_parser.add_argument('--locations', nargs='+', help="List of locations to consider, names should match the keys listed in the configuration file.")
return generate_parser
def generate(config_file='moosedocs.yml', generate=True, locations=None, **kwargs):
"""
Generates MOOSE system and object markdown files from the source code.
Args:
config_file[str]: (Default: 'moosedocs.yml') The MooseDocs project configuration file.
"""
# Read the configuration
config = MooseDocs.load_config(config_file)
_, ext_config = MooseDocs.get_markdown_extensions(config)
ext_config = ext_config['MooseDocs.extensions.MooseMarkdown']
# Run the executable
exe = MooseDocs.abspath(ext_config['executable'])
if not os.path.exists(exe):
raise IOError('The executable does not exist: {}'.format(exe))
else:
log.debug("Executing {} to extract syntax.".format(exe))
raw = mooseutils.runExe(exe, '--yaml')
yaml = mooseutils.MooseYaml(raw)
# Populate the syntax
for loc in ext_config['locations']:
for key, value in loc.iteritems():
if (locations == None) or (key in locations):
value['group'] = key
syntax = MooseDocs.MooseApplicationSyntax(yaml, generate=generate, install=ext_config['install'], **value)
log.info("Checking documentation for '{}'.".format(key))
syntax.check()
| lgpl-2.1 | 5,474,276,869,488,271,000 | 40.840909 | 190 | 0.677892 | false |
jeromecc/doctoctocbot | src/conversation/tree/descendant.py | 1 | 7925 | import logging
from typing import Optional
import queue
import threading
import sys
import pytz
from datetime import datetime, timedelta
import time
from constance import config
from bot.lib.statusdb import Addstatus
from conversation.models import Treedj, Tweetdj
from conversation.models import create_leaf, create_tree
from community.helpers import get_community_twitter_tweepy_api
from community.models import Community
from bot.addstatusdj import addstatus
from conversation.tree.utils import get_community_roots
logger = logging.getLogger(__name__)
class ReplyCrawler(object):
"""
A class used to represent an Twitter tree crawler based on the search API
...
Attributes
----------
community : Community
a Community object, the crawler will build statuse trees of this community
api : Tweepy API object
Tweepy API object
root_id : int
status id of the root of the tree
q : PriorityQueue
unused
since_id: int
search will start from this id
tree_init: List[int]
list of ids of descendants already known for this tree root
Methods
-------
TODO
"""
max_count = 100
def __init__(self, community):
self.community = community
self.api = get_community_twitter_tweepy_api(
self.community,
backend=False
)
#self.root_id = root_id
#self.q = queue.PriorityQueue(maxsize=0)
#self.since_id = root_id
#self.tree_init = ReplyCrawler.root_descendants(
# root_id,
# include_self=True
#)
@staticmethod
def root_descendants(root_id, include_self: bool = True):
"""Return list of descendants' ids
"""
try:
root = Treedj.objects.get(statusid=root_id)
except Treedj.DoesNotExist:
root = create_tree(root_id)
if root:
return sorted(
list(
root.get_descendants(include_self=include_self)
.values_list("statusid", flat=True)
)
)
def get_screen_name(self, status_id):
try:
status = Tweetdj.objects.get(statusid=status_id)
return status.json["user"]["screen_name"]
except Tweetdj.DoesNotExist:
logger.debug(f"Tweet {status_id} is not present in database.")
addstatus(status_id, self.community.account.username)
if Tweetdj.objects.filter(statusid=status_id).exists():
return self.get_screen_name(status_id)
except TypeError:
if status.json is None:
logger.debug(f"Tweet {status_id} json field is null.")
return
except KeyError:
logger.debug(f"Tweet {status_id} json is buggy.")
return
def get_replies(self, status_id, since_id=None):
screen_name = self.get_screen_name(status_id)
if not since_id:
since_id = status_id
replies = self.search_reply_to(
screen_name,
community=self.community,
since_id = since_id
)
logger.debug(f"{[(r.id, r.text) for r in replies]}")
if len(replies) == ReplyCrawler.max_count:
while True:
hi = 0
for x in [reply.id for reply in replies]:
hi = max(x,hi)
more_replies = self.search_reply_to(
screen_name,
community=self.community,
since_id = hi
)
if more_replies:
replies.extend(more_replies)
if len(more_replies) < 100:
break
replies = more_replies
return replies
def add_leaves(self, root_id):
try:
root = Treedj.objects.get(statusid=root_id)
except Treedj.DoesNotExist:
return
replies = self.get_replies(root_id, since_id=root_id)
if not replies:
return
root_descendants = ReplyCrawler.root_descendants(
root.statusid,
include_self=True
)
logger.debug(f"{root_descendants=}")
# sort replies by id, by chronological order
replies.sort(key = lambda i: i.id)
for r in replies:
if r.in_reply_to_status_id in root_descendants:
create_leaf(r.id, r.in_reply_to_status_id)
return root
def build_tree(self, root_id):
root = self.add_leaves(root_id)
if not root:
return
# Get existing descendants
root_descendants = ReplyCrawler.root_descendants(
root.statusid,
include_self=True
)
# Search for new leaves
for r in root_descendants:
self.add_leaves(r)
tree_current = ReplyCrawler.root_descendants(
root_id,
include_self=True
)
# Add new nodes to status database
new_nodes = list(set(tree_current) - set(root_descendants))
if new_nodes:
self.lookup_status(new_nodes)
def lookup_status(self, ids):
def paginator(seq, rowlen):
for start in range(0, len(seq), rowlen):
yield seq[start:start+rowlen]
for _ids in paginator(ids, 99):
statuses = self.api.statuses_lookup(_ids, tweet_mode="extended")
for status in statuses:
db = Addstatus(status._json)
db.addtweetdj()
"""
def enqueue_nodes(self, replies):
for reply in replies:
self.q.put(reply.id)
"""
def search_reply_to(
self,
screen_name: str,
community: Community,
since_id: Optional[int] = None,
):
"""Return SearchObject list containing replies to screen_name
Replies are limited to last 7 days.
"""
q = f"@{screen_name}"
try:
return self.api.search(
q=q,
since_id=since_id,
include_entities=True,
count=ReplyCrawler.max_count
)
except AttributeError:
logger.error("Probable Tweepy API error.")
def tree_search_crawl(
community_name,
days: int = (
config.conversation__tree__descendant__tree_search_crawl__days
)
):
try:
community = Community.objects.get(name=community_name)
except Community.DoesNotExist:
return False
# get tree roots for this community created less than 7 days ago
lower_dt = datetime.utcnow().replace(tzinfo=pytz.utc) - timedelta(days=days)
logger.debug(f"{lower_dt=}")
roots = get_community_roots(community, lower_dt=lower_dt)
if not roots:
backoff = community.no_root_backoff
if not backoff:
backoff = config.community__models__no_root_backoff__default
logger.debug(f"No root found. Sleeping for {backoff} seconds.")
time.sleep(backoff)
return True
rc = ReplyCrawler(community)
start_dt = datetime.utcnow()
logger.info(
f"Starting tree_search_crawl loop over {len(roots)} root nodes "
f"at {start_dt} UTC."
)
for root in roots:
rc.build_tree(root)
stop_dt = datetime.utcnow()
logger.info(
f"Ending tree_search_crawl loop over {len(roots)} root nodes "
f"at {stop_dt} UTC. \n"
f"(Loop had started at {start_dt} UTC.)"
)
retry = community.tree_search_retry
if not retry:
retry = config.community__models__tree_search_retry__default
diff = retry - (stop_dt - start_dt)
if diff > 0:
logger.debug(
f"Execution time inferior to retry minimum. "
f"Sleeping for {backoff} seconds."
)
time.sleep(diff)
return True | mpl-2.0 | -4,849,885,340,449,427,000 | 30.959677 | 82 | 0.571483 | false |
buildbot/buildbot | master/buildbot/test/fake/botmaster.py | 5 | 1742 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from buildbot.process import botmaster
from buildbot.util import service
class FakeBotMaster(service.AsyncMultiService, botmaster.LockRetrieverMixin):
def __init__(self):
super().__init__()
self.setName("fake-botmaster")
self.builders = {} # dictionary mapping worker names to builders
self.buildsStartedForWorkers = []
self.delayShutdown = False
def getBuildersForWorker(self, workername):
return self.builders.get(workername, [])
def maybeStartBuildsForWorker(self, workername):
self.buildsStartedForWorkers.append(workername)
def maybeStartBuildsForAllBuilders(self):
self.buildsStartedForWorkers += self.builders.keys()
def workerLost(self, bot):
pass
def cleanShutdown(self, quickMode=False, stopReactor=True):
self.shuttingDown = True
if self.delayShutdown:
self.shutdownDeferred = defer.Deferred()
return self.shutdownDeferred
return None
| gpl-2.0 | 204,329,116,648,731,100 | 34.55102 | 79 | 0.725603 | false |
OCHA-DAP/hdx-python-api | src/hdx/data/showcase.py | 1 | 14611 | # -*- coding: utf-8 -*-
"""Showcase class containing all logic for creating, checking, and updating showcases."""
import logging
import sys
from os.path import join
from typing import List, Union, Optional, Dict, Any, Tuple
from hdx.utilities import is_valid_uuid
from hdx.utilities.dictandlist import merge_two_dictionaries
import hdx.data.dataset
import hdx.data.hdxobject
import hdx.data.vocabulary
from hdx.hdx_configuration import Configuration
logger = logging.getLogger(__name__)
class Showcase(hdx.data.hdxobject.HDXObject):
"""Showcase class containing all logic for creating, checking, and updating showcases.
Args:
initial_data (Optional[Dict]): Initial showcase metadata dictionary. Defaults to None.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
"""
max_int = sys.maxsize
dataset_ids_field = 'dataset_ids'
def __init__(self, initial_data=None, configuration=None):
# type: (Optional[Dict], Optional[Configuration]) -> None
if not initial_data:
initial_data = dict()
super(Showcase, self).__init__(initial_data, configuration=configuration)
@staticmethod
def actions():
# type: () -> Dict[str, str]
"""Dictionary of actions that can be performed on object
Returns:
Dict[str, str]: Dictionary of actions that can be performed on object
"""
return {
'show': 'ckanext_showcase_show',
'update': 'ckanext_showcase_update',
'create': 'ckanext_showcase_create',
'delete': 'ckanext_showcase_delete',
'list': 'ckanext_showcase_list',
'associate': 'ckanext_showcase_package_association_create',
'disassociate': 'ckanext_showcase_package_association_delete',
'list_datasets': 'ckanext_showcase_package_list',
'list_showcases': 'ckanext_package_showcase_list'
}
def update_from_yaml(self, path=join('config', 'hdx_showcase_static.yml')):
# type: (str) -> None
"""Update showcase metadata with static metadata from YAML file
Args:
path (Optional[str]): Path to YAML dataset metadata. Defaults to config/hdx_showcase_static.yml.
Returns:
None
"""
super(Showcase, self).update_from_yaml(path)
def update_from_json(self, path=join('config', 'hdx_showcase_static.json')):
# type: (str) -> None
"""Update showcase metadata with static metadata from JSON file
Args:
path (Optional[str]): Path to JSON dataset metadata. Defaults to config/hdx_showcase_static.json.
Returns:
None
"""
super(Showcase, self).update_from_json(path)
@classmethod
def read_from_hdx(cls, identifier, configuration=None):
# type: (str, Optional[Configuration]) -> Optional['Showcase']
"""Reads the showcase given by identifier from HDX and returns Showcase object
Args:
identifier (str): Identifier of showcase
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
Returns:
Optional[Showcase]: Showcase object if successful read, None if not
"""
return cls._read_from_hdx_class('showcase', identifier, configuration)
def check_required_fields(self, ignore_fields=list()):
# type: (List[str]) -> None
"""Check that metadata for showcase is complete. The parameter ignore_fields should
be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
Returns:
None
"""
self._check_required_fields('showcase', ignore_fields)
def update_in_hdx(self, **kwargs):
# type: (Any) -> None
"""Check if showcase exists in HDX and if so, update it
Returns:
None
"""
self._check_load_existing_object('showcase', 'name')
# We load an existing object even though it may well have been loaded already
# to prevent an admittedly unlikely race condition where someone has updated
# the object in the intervening time
merge_two_dictionaries(self.data, self.old_data)
self.clean_tags()
self._hdx_update('showcase', 'name', force_active=True, **kwargs)
self._update_in_hdx('showcase', 'name', **kwargs)
def create_in_hdx(self, **kwargs):
# type: (Any) -> None
"""Check if showcase exists in HDX and if so, update it, otherwise create it
Returns:
None
"""
if 'ignore_check' not in kwargs: # allow ignoring of field checks
self.check_required_fields()
if 'name' in self.data and self._load_from_hdx('showcase', self.data['name']):
logger.warning('%s exists. Updating %s' % ('showcase', self.data['name']))
merge_two_dictionaries(self.data, self.old_data)
self.clean_tags()
self._hdx_update('showcase', 'name', force_active=True, **kwargs)
else:
self.clean_tags()
self._save_to_hdx('create', 'title', force_active=True)
self._create_in_hdx('showcase', 'name', 'title', **kwargs)
def delete_from_hdx(self):
# type: () -> None
"""Deletes a showcase from HDX.
Returns:
None
"""
self._delete_from_hdx('showcase', 'id')
def get_tags(self):
# type: () -> List[str]
"""Return the dataset's list of tags
Returns:
List[str]: List of tags or [] if there are none
"""
return self._get_tags()
def add_tag(self, tag, log_deleted=True):
# type: (str, bool) -> Tuple[List[str], List[str]]
"""Add a tag
Args:
tag (str): Tag to add
log_deleted (bool): Whether to log informational messages about deleted tags. Defaults to True.
Returns:
Tuple[List[str], List[str]]: Tuple containing list of added tags and list of deleted tags and tags not added
"""
return hdx.data.vocabulary.Vocabulary.add_mapped_tag(self, tag, log_deleted=log_deleted)
def add_tags(self, tags, log_deleted=True):
# type: (List[str], bool) -> Tuple[List[str], List[str]]
"""Add a list of tags
Args:
tags (List[str]): List of tags to add
log_deleted (bool): Whether to log informational messages about deleted tags. Defaults to True.
Returns:
Tuple[List[str], List[str]]: Tuple containing list of added tags and list of deleted tags and tags not added
"""
return hdx.data.vocabulary.Vocabulary.add_mapped_tags(self, tags, log_deleted=log_deleted)
def clean_tags(self, log_deleted=True):
# type: (bool) -> Tuple[List[str], List[str]]
"""Clean tags in an HDX object according to tags cleanup spreadsheet
Args:
log_deleted (bool): Whether to log informational messages about deleted tags. Defaults to True.
Returns:
Tuple[List[str], List[str]]: Tuple containing list of mapped tags and list of deleted tags and tags not added
"""
return hdx.data.vocabulary.Vocabulary.clean_tags(self, log_deleted=log_deleted)
def remove_tag(self, tag):
# type: (str) -> bool
"""Remove a tag
Args:
tag (str): Tag to remove
Returns:
bool: True if tag removed or False if not
"""
return self._remove_hdxobject(self.data.get('tags'), tag.lower(), matchon='name')
def get_datasets(self):
# type: () -> List[hdx.data.dataset.Dataset]
"""Get any datasets in the showcase
Returns:
List[Dataset]: List of datasets
"""
assoc_result, datasets_dicts = self._read_from_hdx('showcase', self.data['id'], fieldname='showcase_id',
action=self.actions()['list_datasets'])
datasets = list()
if assoc_result:
for dataset_dict in datasets_dicts:
dataset = hdx.data.dataset.Dataset(dataset_dict, configuration=self.configuration)
datasets.append(dataset)
return datasets
def _get_showcase_dataset_dict(self, dataset):
# type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> Dict
"""Get showcase dataset dict
Args:
showcase (Union[Showcase,Dict,str]): Either a showcase id or Showcase metadata from a Showcase object or dictionary
Returns:
Dict: showcase dataset dict
"""
if isinstance(dataset, hdx.data.dataset.Dataset) or isinstance(dataset, dict):
if 'id' not in dataset:
dataset = hdx.data.dataset.Dataset.read_from_hdx(dataset['name'])
dataset = dataset['id']
elif not isinstance(dataset, str):
raise hdx.data.hdxobject.HDXError('Type %s cannot be added as a dataset!' % type(dataset).__name__)
if is_valid_uuid(dataset) is False:
raise hdx.data.hdxobject.HDXError('%s is not a valid dataset id!' % dataset)
return {'showcase_id': self.data['id'], 'package_id': dataset}
def add_dataset(self, dataset, datasets_to_check=None):
# type: (Union[hdx.data.dataset.Dataset,Dict,str], List[hdx.data.dataset.Dataset]) -> bool
"""Add a dataset
Args:
dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if the dataset was added, False if already present
"""
showcase_dataset = self._get_showcase_dataset_dict(dataset)
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
for dataset in datasets_to_check:
if showcase_dataset['package_id'] == dataset['id']:
return False
self._write_to_hdx('associate', showcase_dataset, 'package_id')
return True
def add_datasets(self, datasets, datasets_to_check=None):
# type: (List[Union[hdx.data.dataset.Dataset,Dict,str]], List[hdx.data.dataset.Dataset]) -> bool
"""Add multiple datasets
Args:
datasets (List[Union[Dataset,Dict,str]]): A list of either dataset ids or dataset metadata from Dataset objects or dictionaries
datasets_to_check (List[Dataset]): List of datasets against which to check existence of dataset. Defaults to datasets in showcase.
Returns:
bool: True if all datasets added or False if any already present
"""
if datasets_to_check is None:
datasets_to_check = self.get_datasets()
alldatasetsadded = True
for dataset in datasets:
if not self.add_dataset(dataset, datasets_to_check=datasets_to_check):
alldatasetsadded = False
return alldatasetsadded
def remove_dataset(self, dataset):
# type: (Union[hdx.data.dataset.Dataset,Dict,str]) -> None
"""Remove a dataset
Args:
dataset (Union[Dataset,Dict,str]): Either a dataset id or dataset metadata either from a Dataset object or a dictionary
Returns:
None
"""
self._write_to_hdx('disassociate', self._get_showcase_dataset_dict(dataset), 'package_id')
@classmethod
def search_in_hdx(cls, query='*:*', configuration=None, page_size=1000, **kwargs):
# type: (Optional[str], Optional[Configuration], int, Any) -> List['Showcase']
"""Searches for datasets in HDX
Args:
query (Optional[str]): Query (in Solr format). Defaults to '*:*'.
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
page_size (int): Size of page to return. Defaults to 1000.
**kwargs: See below
fq (string): Any filter queries to apply
sort (string): Sorting of the search results. Defaults to 'relevance asc, metadata_modified desc'.
rows (int): Number of matching rows to return. Defaults to all datasets (sys.maxsize).
start (int): Offset in the complete result for where the set of returned datasets should begin
facet (string): Whether to enable faceted results. Default to True.
facet.mincount (int): Minimum counts for facet fields should be included in the results
facet.limit (int): Maximum number of values the facet fields return (- = unlimited). Defaults to 50.
facet.field (List[str]): Fields to facet upon. Default is empty.
use_default_schema (bool): Use default package schema instead of custom schema. Defaults to False.
Returns:
List[Dataset]: list of datasets resulting from query
"""
curfq = kwargs.get('fq')
kwargs['fq'] = 'dataset_type:showcase'
if curfq:
kwargs['fq'] = '%s AND %s' % (kwargs['fq'], curfq)
datasets = hdx.data.dataset.Dataset.search_in_hdx(query=query, configuration=configuration,
page_size=page_size, **kwargs)
showcases = list()
for dataset in datasets:
showcase = Showcase(configuration=configuration)
showcase.data = dataset.data
showcase.old_data = dataset.old_data
showcases.append(showcase)
return showcases
@classmethod
def get_all_showcases(cls, configuration=None, page_size=1000, **kwargs):
# type: (Optional[Configuration], int, Any) -> List['Showcase']
"""Get all showcases in HDX
Args:
configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration.
page_size (int): Size of page to return. Defaults to 1000.
**kwargs: See below
rows (int): Number of rows to return. Defaults to all showcases (sys.maxsize)
start (int): Offset in the complete result for where the set of returned showcases should begin
Returns:
List[Showcase]: list of all showcases in HDX
"""
return cls.search_in_hdx(configuration=configuration, page_size=page_size, **kwargs)
| mit | 3,910,999,517,759,824,400 | 40.86533 | 142 | 0.616659 | false |
NikNitro/Python-iBeacon-Scan | sympy/parsing/sympy_parser.py | 29 | 32979 | """Transform a string with Python-like source code into SymPy expression. """
from __future__ import print_function, division
from .sympy_tokenize import \
generate_tokens, untokenize, TokenError, \
NUMBER, STRING, NAME, OP, ENDMARKER
from keyword import iskeyword
import ast
import re
import unicodedata
import sympy
from sympy.core.compatibility import exec_, StringIO
from sympy.core.basic import Basic
_re_repeated = re.compile(r"^(\d*)\.(\d*)\[(\d+)\]$")
def _token_splittable(token):
"""
Predicate for whether a token name can be split into multiple tokens.
A token is splittable if it does not contain an underscore character and
it is not the name of a Greek letter. This is used to implicitly convert
expressions like 'xyz' into 'x*y*z'.
"""
if '_' in token:
return False
else:
try:
return not unicodedata.lookup('GREEK SMALL LETTER ' + token)
except KeyError:
pass
if len(token) > 1:
return True
return False
def _token_callable(token, local_dict, global_dict, nextToken=None):
"""
Predicate for whether a token name represents a callable function.
Essentially wraps ``callable``, but looks up the token name in the
locals and globals.
"""
func = local_dict.get(token[1])
if not func:
func = global_dict.get(token[1])
return callable(func) and not isinstance(func, sympy.Symbol)
def _add_factorial_tokens(name, result):
if result == [] or result[-1][1] == '(':
raise TokenError()
beginning = [(NAME, name), (OP, '(')]
end = [(OP, ')')]
diff = 0
length = len(result)
for index, token in enumerate(result[::-1]):
toknum, tokval = token
i = length - index - 1
if tokval == ')':
diff += 1
elif tokval == '(':
diff -= 1
if diff == 0:
if i - 1 >= 0 and result[i - 1][0] == NAME:
return result[:i - 1] + beginning + result[i - 1:] + end
else:
return result[:i] + beginning + result[i:] + end
return result
class AppliedFunction(object):
"""
A group of tokens representing a function and its arguments.
`exponent` is for handling the shorthand sin^2, ln^2, etc.
"""
def __init__(self, function, args, exponent=None):
if exponent is None:
exponent = []
self.function = function
self.args = args
self.exponent = exponent
self.items = ['function', 'args', 'exponent']
def expand(self):
"""Return a list of tokens representing the function"""
result = []
result.append(self.function)
result.extend(self.args)
return result
def __getitem__(self, index):
return getattr(self, self.items[index])
def __repr__(self):
return "AppliedFunction(%s, %s, %s)" % (self.function, self.args,
self.exponent)
class ParenthesisGroup(list):
"""List of tokens representing an expression in parentheses."""
pass
def _flatten(result):
result2 = []
for tok in result:
if isinstance(tok, AppliedFunction):
result2.extend(tok.expand())
else:
result2.append(tok)
return result2
def _group_parentheses(recursor):
def _inner(tokens, local_dict, global_dict):
"""Group tokens between parentheses with ParenthesisGroup.
Also processes those tokens recursively.
"""
result = []
stacks = []
stacklevel = 0
for token in tokens:
if token[0] == OP:
if token[1] == '(':
stacks.append(ParenthesisGroup([]))
stacklevel += 1
elif token[1] == ')':
stacks[-1].append(token)
stack = stacks.pop()
if len(stacks) > 0:
# We don't recurse here since the upper-level stack
# would reprocess these tokens
stacks[-1].extend(stack)
else:
# Recurse here to handle nested parentheses
# Strip off the outer parentheses to avoid an infinite loop
inner = stack[1:-1]
inner = recursor(inner,
local_dict,
global_dict)
parenGroup = [stack[0]] + inner + [stack[-1]]
result.append(ParenthesisGroup(parenGroup))
stacklevel -= 1
continue
if stacklevel:
stacks[-1].append(token)
else:
result.append(token)
if stacklevel:
raise TokenError("Mismatched parentheses")
return result
return _inner
def _apply_functions(tokens, local_dict, global_dict):
"""Convert a NAME token + ParenthesisGroup into an AppliedFunction.
Note that ParenthesisGroups, if not applied to any function, are
converted back into lists of tokens.
"""
result = []
symbol = None
for tok in tokens:
if tok[0] == NAME:
symbol = tok
result.append(tok)
elif isinstance(tok, ParenthesisGroup):
if symbol and _token_callable(symbol, local_dict, global_dict):
result[-1] = AppliedFunction(symbol, tok)
symbol = None
else:
result.extend(tok)
else:
symbol = None
result.append(tok)
return result
def _implicit_multiplication(tokens, local_dict, global_dict):
"""Implicitly adds '*' tokens.
Cases:
- Two AppliedFunctions next to each other ("sin(x)cos(x)")
- AppliedFunction next to an open parenthesis ("sin x (cos x + 1)")
- A close parenthesis next to an AppliedFunction ("(x+2)sin x")\
- A close parenthesis next to an open parenthesis ("(x+2)(x+3)")
- AppliedFunction next to an implicitly applied function ("sin(x)cos x")
"""
result = []
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (isinstance(tok, AppliedFunction) and
isinstance(nextTok, AppliedFunction)):
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and
nextTok[0] == OP and nextTok[1] == '('):
# Applied function followed by an open parenthesis
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
isinstance(nextTok, AppliedFunction)):
# Close parenthesis followed by an applied function
result.append((OP, '*'))
elif (tok[0] == OP and tok[1] == ')' and
nextTok[0] == NAME):
# Close parenthesis followed by an implicitly applied function
result.append((OP, '*'))
elif (tok[0] == nextTok[0] == OP
and tok[1] == ')' and nextTok[1] == '('):
# Close parenthesis followed by an open parenthesis
result.append((OP, '*'))
elif (isinstance(tok, AppliedFunction) and nextTok[0] == NAME):
# Applied function followed by implicitly applied function
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == OP and nextTok[1] == '('):
# Constant followed by parenthesis
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
nextTok[0] == NAME and
not _token_callable(nextTok, local_dict, global_dict)):
# Constant followed by constant
result.append((OP, '*'))
elif (tok[0] == NAME and
not _token_callable(tok, local_dict, global_dict) and
(isinstance(nextTok, AppliedFunction) or nextTok[0] == NAME)):
# Constant followed by (implicitly applied) function
result.append((OP, '*'))
if tokens:
result.append(tokens[-1])
return result
def _implicit_application(tokens, local_dict, global_dict):
"""Adds parentheses as needed after functions."""
result = []
appendParen = 0 # number of closing parentheses to add
skip = 0 # number of tokens to delay before adding a ')' (to
# capture **, ^, etc.)
exponentSkip = False # skipping tokens before inserting parentheses to
# work with function exponentiation
for tok, nextTok in zip(tokens, tokens[1:]):
result.append(tok)
if (tok[0] == NAME and
nextTok[0] != OP and
nextTok[0] != ENDMARKER):
if _token_callable(tok, local_dict, global_dict, nextTok):
result.append((OP, '('))
appendParen += 1
# name followed by exponent - function exponentiation
elif (tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**'):
if _token_callable(tok, local_dict, global_dict):
exponentSkip = True
elif exponentSkip:
# if the last token added was an applied function (i.e. the
# power of the function exponent) OR a multiplication (as
# implicit multiplication would have added an extraneous
# multiplication)
if (isinstance(tok, AppliedFunction)
or (tok[0] == OP and tok[1] == '*')):
# don't add anything if the next token is a multiplication
# or if there's already a parenthesis (if parenthesis, still
# stop skipping tokens)
if not (nextTok[0] == OP and nextTok[1] == '*'):
if not(nextTok[0] == OP and nextTok[1] == '('):
result.append((OP, '('))
appendParen += 1
exponentSkip = False
elif appendParen:
if nextTok[0] == OP and nextTok[1] in ('^', '**', '*'):
skip = 1
continue
if skip:
skip -= 1
continue
result.append((OP, ')'))
appendParen -= 1
if tokens:
result.append(tokens[-1])
if appendParen:
result.extend([(OP, ')')] * appendParen)
return result
def function_exponentiation(tokens, local_dict, global_dict):
"""Allows functions to be exponentiated, e.g. ``cos**2(x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, function_exponentiation)
>>> transformations = standard_transformations + (function_exponentiation,)
>>> parse_expr('sin**4(x)', transformations=transformations)
sin(x)**4
"""
result = []
exponent = []
consuming_exponent = False
level = 0
for tok, nextTok in zip(tokens, tokens[1:]):
if tok[0] == NAME and nextTok[0] == OP and nextTok[1] == '**':
if _token_callable(tok, local_dict, global_dict):
consuming_exponent = True
elif consuming_exponent:
exponent.append(tok)
# only want to stop after hitting )
if tok[0] == nextTok[0] == OP and tok[1] == ')' and nextTok[1] == '(':
consuming_exponent = False
# if implicit multiplication was used, we may have )*( instead
if tok[0] == nextTok[0] == OP and tok[1] == '*' and nextTok[1] == '(':
consuming_exponent = False
del exponent[-1]
continue
elif exponent and not consuming_exponent:
if tok[0] == OP:
if tok[1] == '(':
level += 1
elif tok[1] == ')':
level -= 1
if level == 0:
result.append(tok)
result.extend(exponent)
exponent = []
continue
result.append(tok)
if tokens:
result.append(tokens[-1])
if exponent:
result.extend(exponent)
return result
def split_symbols_custom(predicate):
"""Creates a transformation that splits symbol names.
``predicate`` should return True if the symbol name is to be split.
For instance, to retain the default behavior but avoid splitting certain
symbol names, a predicate like this would work:
>>> from sympy.parsing.sympy_parser import (parse_expr, _token_splittable,
... standard_transformations, implicit_multiplication,
... split_symbols_custom)
>>> def can_split(symbol):
... if symbol not in ('list', 'of', 'unsplittable', 'names'):
... return _token_splittable(symbol)
... return False
...
>>> transformation = split_symbols_custom(can_split)
>>> parse_expr('unsplittable', transformations=standard_transformations +
... (transformation, implicit_multiplication))
unsplittable
"""
def _split_symbols(tokens, local_dict, global_dict):
result = []
split = False
split_previous=False
for tok in tokens:
if split_previous:
# throw out closing parenthesis of Symbol that was split
split_previous=False
continue
split_previous=False
if tok[0] == NAME and tok[1] == 'Symbol':
split = True
elif split and tok[0] == NAME:
symbol = tok[1][1:-1]
if predicate(symbol):
for char in symbol:
if char in local_dict or char in global_dict:
# Get rid of the call to Symbol
del result[-2:]
result.extend([(NAME, "%s" % char),
(NAME, 'Symbol'), (OP, '(')])
else:
result.extend([(NAME, "'%s'" % char), (OP, ')'),
(NAME, 'Symbol'), (OP, '(')])
# Delete the last two tokens: get rid of the extraneous
# Symbol( we just added
# Also, set split_previous=True so will skip
# the closing parenthesis of the original Symbol
del result[-2:]
split = False
split_previous = True
continue
else:
split = False
result.append(tok)
return result
return _split_symbols
#: Splits symbol names for implicit multiplication.
#:
#: Intended to let expressions like ``xyz`` be parsed as ``x*y*z``. Does not
#: split Greek character names, so ``theta`` will *not* become
#: ``t*h*e*t*a``. Generally this should be used with
#: ``implicit_multiplication``.
split_symbols = split_symbols_custom(_token_splittable)
def implicit_multiplication(result, local_dict, global_dict):
"""Makes the multiplication operator optional in most cases.
Use this before :func:`implicit_application`, otherwise expressions like
``sin 2x`` will be parsed as ``x * sin(2)`` rather than ``sin(2*x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication)
>>> transformations = standard_transformations + (implicit_multiplication,)
>>> parse_expr('3 x y', transformations=transformations)
3*x*y
"""
# These are interdependent steps, so we don't expose them separately
for step in (_group_parentheses(implicit_multiplication),
_apply_functions,
_implicit_multiplication):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_application(result, local_dict, global_dict):
"""Makes parentheses optional in some cases for function calls.
Use this after :func:`implicit_multiplication`, otherwise expressions
like ``sin 2x`` will be parsed as ``x * sin(2)`` rather than
``sin(2*x)``.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_application)
>>> transformations = standard_transformations + (implicit_application,)
>>> parse_expr('cot z + csc z', transformations=transformations)
cot(z) + csc(z)
"""
for step in (_group_parentheses(implicit_application),
_apply_functions,
_implicit_application,):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
def implicit_multiplication_application(result, local_dict, global_dict):
"""Allows a slightly relaxed syntax.
- Parentheses for single-argument method calls are optional.
- Multiplication is implicit.
- Symbol names can be split (i.e. spaces are not needed between
symbols).
- Functions can be exponentiated.
Examples
========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, implicit_multiplication_application)
>>> parse_expr("10sin**2 x**2 + 3xyz + tan theta",
... transformations=(standard_transformations +
... (implicit_multiplication_application,)))
3*x*y*z + 10*sin(x**2)**2 + tan(theta)
"""
for step in (split_symbols, implicit_multiplication,
implicit_application, function_exponentiation):
result = step(result, local_dict, global_dict)
return result
def auto_symbol(tokens, local_dict, global_dict):
"""Inserts calls to ``Symbol`` for undefined variables."""
result = []
prevTok = (None, None)
tokens.append((None, None)) # so zip traverses all tokens
for tok, nextTok in zip(tokens, tokens[1:]):
tokNum, tokVal = tok
nextTokNum, nextTokVal = nextTok
if tokNum == NAME:
name = tokVal
if (name in ['True', 'False', 'None']
or iskeyword(name)
or name in local_dict
# Don't convert attribute access
or (prevTok[0] == OP and prevTok[1] == '.')
# Don't convert keyword arguments
or (prevTok[0] == OP and prevTok[1] in ('(', ',')
and nextTokNum == OP and nextTokVal == '=')):
result.append((NAME, name))
continue
elif name in global_dict:
obj = global_dict[name]
if isinstance(obj, (Basic, type)) or callable(obj):
result.append((NAME, name))
continue
result.extend([
(NAME, 'Symbol'),
(OP, '('),
(NAME, repr(str(name))),
(OP, ')'),
])
else:
result.append((tokNum, tokVal))
prevTok = (tokNum, tokVal)
return result
def lambda_notation(tokens, local_dict, global_dict):
"""Substitutes "lambda" with its Sympy equivalent Lambda().
However, the conversion doesn't take place if only "lambda"
is passed because that is a syntax error.
"""
result = []
flag = False
toknum, tokval = tokens[0]
tokLen = len(tokens)
if toknum == NAME and tokval == 'lambda':
if tokLen == 2:
result.extend(tokens)
elif tokLen > 2:
result.extend([
(NAME, 'Lambda'),
(OP, '('),
(OP, '('),
(OP, ')'),
(OP, ')'),
])
for tokNum, tokVal in tokens[1:]:
if tokNum == OP and tokVal == ':':
tokVal = ','
flag = True
if not flag and tokNum == OP and tokVal in ['*', '**']:
raise TokenError("Starred arguments in lambda not supported")
if flag:
result.insert(-1, (tokNum, tokVal))
else:
result.insert(-2, (tokNum, tokVal))
else:
result.extend(tokens)
return result
def factorial_notation(tokens, local_dict, global_dict):
"""Allows standard notation for factorial."""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == OP:
op = tokval
if op == '!!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial2', result)
elif op == '!':
if prevtoken == '!' or prevtoken == '!!':
raise TokenError
result = _add_factorial_tokens('factorial', result)
else:
result.append((OP, op))
else:
result.append((toknum, tokval))
prevtoken = tokval
return result
def convert_xor(tokens, local_dict, global_dict):
"""Treats XOR, ``^``, as exponentiation, ``**``."""
result = []
for toknum, tokval in tokens:
if toknum == OP:
if tokval == '^':
result.append((OP, '**'))
else:
result.append((toknum, tokval))
else:
result.append((toknum, tokval))
return result
def auto_number(tokens, local_dict, global_dict):
"""Converts numeric literals to use SymPy equivalents.
Complex numbers use ``I``; integer literals use ``Integer``, float
literals use ``Float``, and repeating decimals use ``Rational``.
"""
result = []
prevtoken = ''
for toknum, tokval in tokens:
if toknum == NUMBER:
number = tokval
postfix = []
if number.endswith('j') or number.endswith('J'):
number = number[:-1]
postfix = [(OP, '*'), (NAME, 'I')]
if '.' in number or (('e' in number or 'E' in number) and
not (number.startswith('0x') or number.startswith('0X'))):
match = _re_repeated.match(number)
if match is not None:
# Clear repeating decimals, e.g. 3.4[31] -> (3 + 4/10 + 31/990)
pre, post, repetend = match.groups()
zeros = '0'*len(post)
post, repetends = [w.lstrip('0') for w in [post, repetend]]
# or else interpreted as octal
a = pre or '0'
b, c = post or '0', '1' + zeros
d, e = repetends, ('9'*len(repetend)) + zeros
seq = [
(OP, '('),
(NAME,
'Integer'), (OP, '('), (NUMBER, a), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, b), (OP, ','), (NUMBER, c), (OP, ')'),
(OP, '+'),
(NAME, 'Rational'), (OP, '('), (
NUMBER, d), (OP, ','), (NUMBER, e), (OP, ')'),
(OP, ')'),
]
else:
seq = [(NAME, 'Float'), (OP, '('),
(NUMBER, repr(str(number))), (OP, ')')]
else:
seq = [(NAME, 'Integer'), (OP, '('), (
NUMBER, number), (OP, ')')]
result.extend(seq + postfix)
else:
result.append((toknum, tokval))
return result
def rationalize(tokens, local_dict, global_dict):
"""Converts floats into ``Rational``. Run AFTER ``auto_number``."""
result = []
passed_float = False
for toknum, tokval in tokens:
if toknum == NAME:
if tokval == 'Float':
passed_float = True
tokval = 'Rational'
result.append((toknum, tokval))
elif passed_float == True and toknum == NUMBER:
passed_float = False
result.append((STRING, tokval))
else:
result.append((toknum, tokval))
return result
def _transform_equals_sign(tokens, local_dict, global_dict):
"""Transforms the equals sign ``=`` to instances of Eq.
This is a helper function for `convert_equals_signs`.
Works with expressions containing one equals sign and no
nesting. Expressions like `(1=2)=False` won't work with this
and should be used with `convert_equals_signs`.
Examples: 1=2 to Eq(1,2)
1*2=x to Eq(1*2, x)
This does not deal with function arguments yet.
"""
result = []
if (OP, "=") in tokens:
result.append((NAME, "Eq"))
result.append((OP, "("))
for index, token in enumerate(tokens):
if token == (OP, "="):
result.append((OP, ","))
continue
result.append(token)
result.append((OP, ")"))
else:
result = tokens
return result
def convert_equals_signs(result, local_dict, global_dict):
""" Transforms all the equals signs ``=`` to instances of Eq.
Parses the equals signs in the expression and replaces them with
appropriate Eq instances.Also works with nested equals signs.
Does not yet play well with function arguments.
For example, the expression `(x=y)` is ambiguous and can be interpreted
as x being an argument to a function and `convert_equals_signs` won't
work for this.
See also
========
convert_equality_operators
Examples:
=========
>>> from sympy.parsing.sympy_parser import (parse_expr,
... standard_transformations, convert_equals_signs)
>>> parse_expr("1*2=x", transformations=(
... standard_transformations + (convert_equals_signs,)))
Eq(2, x)
>>> parse_expr("(1*2=x)=False", transformations=(
... standard_transformations + (convert_equals_signs,)))
Eq(Eq(2, x), False)
"""
for step in (_group_parentheses(convert_equals_signs),
_apply_functions,
_transform_equals_sign):
result = step(result, local_dict, global_dict)
result = _flatten(result)
return result
#: Standard transformations for :func:`parse_expr`.
#: Inserts calls to :class:`Symbol`, :class:`Integer`, and other SymPy
#: datatypes and allows the use of standard factorial notation (e.g. ``x!``).
standard_transformations = (lambda_notation, auto_symbol, auto_number, factorial_notation)
def stringify_expr(s, local_dict, global_dict, transformations):
"""
Converts the string ``s`` to Python code, in ``local_dict``
Generally, ``parse_expr`` should be used.
"""
tokens = []
input_code = StringIO(s.strip())
for toknum, tokval, _, _, _ in generate_tokens(input_code.readline):
tokens.append((toknum, tokval))
for transform in transformations:
tokens = transform(tokens, local_dict, global_dict)
return untokenize(tokens)
def eval_expr(code, local_dict, global_dict):
"""
Evaluate Python code generated by ``stringify_expr``.
Generally, ``parse_expr`` should be used.
"""
expr = eval(
code, global_dict, local_dict) # take local objects in preference
return expr
def parse_expr(s, local_dict=None, transformations=standard_transformations,
global_dict=None, evaluate=True):
"""Converts the string ``s`` to a SymPy expression, in ``local_dict``
Parameters
==========
s : str
The string to parse.
local_dict : dict, optional
A dictionary of local variables to use when parsing.
global_dict : dict, optional
A dictionary of global variables. By default, this is initialized
with ``from sympy import *``; provide this parameter to override
this behavior (for instance, to parse ``"Q & S"``).
transformations : tuple, optional
A tuple of transformation functions used to modify the tokens of the
parsed expression before evaluation. The default transformations
convert numeric literals into their SymPy equivalents, convert
undefined variables into SymPy symbols, and allow the use of standard
mathematical factorial notation (e.g. ``x!``).
evaluate : bool, optional
When False, the order of the arguments will remain as they were in the
string and automatic simplification that would normally occur is
suppressed. (see examples)
Examples
========
>>> from sympy.parsing.sympy_parser import parse_expr
>>> parse_expr("1/2")
1/2
>>> type(_)
<class 'sympy.core.numbers.Half'>
>>> from sympy.parsing.sympy_parser import standard_transformations,\\
... implicit_multiplication_application
>>> transformations = (standard_transformations +
... (implicit_multiplication_application,))
>>> parse_expr("2x", transformations=transformations)
2*x
When evaluate=False, some automatic simplifications will not occur:
>>> parse_expr("2**3"), parse_expr("2**3", evaluate=False)
(8, 2**3)
In addition the order of the arguments will not be made canonical.
This feature allows one to tell exactly how the expression was entered:
>>> a = parse_expr('1 + x', evaluate=False)
>>> b = parse_expr('x + 1', evaluate=0)
>>> a == b
False
>>> a.args
(1, x)
>>> b.args
(x, 1)
See Also
========
stringify_expr, eval_expr, standard_transformations,
implicit_multiplication_application
"""
if local_dict is None:
local_dict = {}
if global_dict is None:
global_dict = {}
exec_('from sympy import *', global_dict)
code = stringify_expr(s, local_dict, global_dict, transformations)
if not evaluate:
code = compile(evaluateFalse(code), '<string>', 'eval')
return eval_expr(code, local_dict, global_dict)
def evaluateFalse(s):
"""
Replaces operators with the SymPy equivalent and sets evaluate=False.
"""
node = ast.parse(s)
node = EvaluateFalseTransformer().visit(node)
# node is a Module, we want an Expression
node = ast.Expression(node.body[0].value)
return ast.fix_missing_locations(node)
class EvaluateFalseTransformer(ast.NodeTransformer):
operators = {
ast.Add: 'Add',
ast.Mult: 'Mul',
ast.Pow: 'Pow',
ast.Sub: 'Add',
ast.Div: 'Mul',
ast.BitOr: 'Or',
ast.BitAnd: 'And',
ast.BitXor: 'Not',
}
def flatten(self, args, func):
result = []
for arg in args:
if isinstance(arg, ast.Call) and arg.func.id == func:
result.extend(self.flatten(arg.args, func))
else:
result.append(arg)
return result
def visit_BinOp(self, node):
if node.op.__class__ in self.operators:
sympy_class = self.operators[node.op.__class__]
right = self.visit(node.right)
left = self.visit(node.left)
if isinstance(node.left, ast.UnaryOp) and (isinstance(node.right, ast.UnaryOp) == 0) and sympy_class in ('Mul',):
left, right = right, left
if isinstance(node.op, ast.Sub):
right = ast.UnaryOp(op=ast.USub(), operand=right)
if isinstance(node.op, ast.Div):
if isinstance(node.left, ast.UnaryOp):
if isinstance(node.right,ast.UnaryOp):
left, right = right, left
left = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[left, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
else:
right = ast.Call(
func=ast.Name(id='Pow', ctx=ast.Load()),
args=[right, ast.UnaryOp(op=ast.USub(), operand=ast.Num(1))],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
new_node = ast.Call(
func=ast.Name(id=sympy_class, ctx=ast.Load()),
args=[left, right],
keywords=[ast.keyword(arg='evaluate', value=ast.Name(id='False', ctx=ast.Load()))],
starargs=None,
kwargs=None
)
if sympy_class in ('Add', 'Mul'):
# Denest Add or Mul as appropriate
new_node.args = self.flatten(new_node.args, sympy_class)
return new_node
return node
| gpl-3.0 | 2,117,810,750,432,121,900 | 32.929012 | 125 | 0.543558 | false |
xuzhao1211/OnlineExam | misago/forums/models.py | 8 | 6732 | from urlparse import urlparse
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
from mptt.managers import TreeManager
from mptt.models import MPTTModel, TreeForeignKey
from misago.acl import version as acl_version
from misago.acl.models import BaseRole
from misago.conf import settings
from misago.core.cache import cache
from misago.core.utils import slugify
from misago.threads import threadtypes
CACHE_NAME = 'misago_forums_tree'
FORUMS_TREE_ID = 1
class ForumManager(TreeManager):
def private_threads(self):
return self.get_special('private_threads')
def root_category(self):
return self.get_special('root_category')
def get_special(self, special_role):
cache_name = '%s_%s' % (CACHE_NAME, special_role)
special_forum = cache.get(cache_name, 'nada')
if special_forum == 'nada':
special_forum = self.get(special_role=special_role)
cache.set(cache_name, special_forum)
return special_forum
def all_forums(self, include_root=False):
qs = self.filter(tree_id=FORUMS_TREE_ID)
if not include_root:
qs = self.filter(lft__gt=3)
return qs.order_by('lft')
def get_cached_forums_dict(self):
forums_dict = cache.get(CACHE_NAME, 'nada')
if forums_dict == 'nada':
forums_dict = self.get_forums_dict_from_db()
cache.set(CACHE_NAME, forums_dict)
return forums_dict
def get_forums_dict_from_db(self):
forums_dict = {}
for forum in self.all_forums(include_root=True):
forums_dict[forum.pk] = forum
return forums_dict
def clear_cache(self):
cache.delete(CACHE_NAME)
class Forum(MPTTModel):
parent = TreeForeignKey(
'self', null=True, blank=True, related_name='children')
special_role = models.CharField(max_length=255, null=True, blank=True)
role = models.CharField(max_length=255, null=True, blank=True)
name = models.CharField(max_length=255)
slug = models.CharField(max_length=255)
description = models.TextField(null=True, blank=True)
is_closed = models.BooleanField(default=False)
redirect_url = models.CharField(max_length=255, null=True, blank=True)
redirects = models.PositiveIntegerField(default=0)
threads = models.PositiveIntegerField(default=0)
posts = models.PositiveIntegerField(default=0)
last_post_on = models.DateTimeField(null=True, blank=True)
last_thread = models.ForeignKey('misago_threads.Thread', related_name='+',
null=True, blank=True,
on_delete=models.SET_NULL)
last_thread_title = models.CharField(max_length=255, null=True, blank=True)
last_thread_slug = models.CharField(max_length=255, null=True, blank=True)
last_poster = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='+',
null=True, blank=True,
on_delete=models.SET_NULL)
last_poster_name = models.CharField(max_length=255, null=True, blank=True)
last_poster_slug = models.CharField(max_length=255, null=True, blank=True)
prune_started_after = models.PositiveIntegerField(default=0)
prune_replied_after = models.PositiveIntegerField(default=0)
archive_pruned_in = models.ForeignKey('self',
related_name='pruned_archive',
null=True, blank=True,
on_delete=models.SET_NULL)
css_class = models.CharField(max_length=255, null=True, blank=True)
objects = ForumManager()
@property
def thread_type(self):
return threadtypes.get(self.special_role or self.role)
def __unicode__(self):
return unicode(self.thread_type.get_forum_name(self))
def lock(self):
return Forum.objects.select_for_update().get(id=self.id)
def delete(self, *args, **kwargs):
Forum.objects.clear_cache()
acl_version.invalidate()
return super(Forum, self).delete(*args, **kwargs)
def synchronize(self):
self.threads = self.thread_set.filter(is_moderated=False).count()
if self.threads:
replies_sum = self.thread_set.aggregate(models.Sum('replies'))
self.posts = self.threads + replies_sum['replies__sum']
else:
self.posts = 0
if self.threads:
last_thread_qs = self.thread_set.filter(is_moderated=False)
last_thread = last_thread_qs.order_by('-last_post_on')[:1][0]
self.set_last_thread(last_thread)
else:
self.empty_last_thread()
def delete_content(self):
from misago.forums.signals import delete_forum_content
delete_forum_content.send(sender=self)
def move_content(self, new_forum):
from misago.forums.signals import move_forum_content
move_forum_content.send(sender=self, new_forum=new_forum)
@property
def is_category(self):
return self.role == 'category'
@property
def is_forum(self):
return self.role == 'forum'
@property
def is_redirect(self):
return self.role == 'redirect'
@property
def redirect_host(self):
return urlparse(self.redirect_url).hostname
def get_absolute_url(self):
return self.thread_type.get_forum_absolute_url(self)
def get_new_thread_url(self):
return self.thread_type.get_new_thread_url(self)
def set_name(self, name):
self.name = name
self.slug = slugify(name)
def set_last_thread(self, thread):
self.last_post_on = thread.last_post_on
self.last_thread = thread
self.last_thread_title = thread.title
self.last_thread_slug = thread.slug
self.last_poster = thread.last_poster
self.last_poster_name = thread.last_poster_name
self.last_poster_slug = thread.last_poster_slug
def empty_last_thread(self):
self.last_post_on = None
self.last_thread = None
self.last_thread_title = None
self.last_thread_slug = None
self.last_poster = None
self.last_poster_name = None
self.last_poster_slug = None
def has_child(self, child):
return child.lft > self.lft and child.rght < self.rght
class ForumRole(BaseRole):
pass
class RoleForumACL(models.Model):
role = models.ForeignKey('misago_acl.Role', related_name='forums_acls')
forum = models.ForeignKey('Forum', related_name='forum_role_set')
forum_role = models.ForeignKey(ForumRole)
| gpl-2.0 | -3,858,689,910,301,039,600 | 34.246073 | 79 | 0.637849 | false |
systers/mailman | src/mailman/handlers/mime_delete.py | 7 | 11156 | # Copyright (C) 2002-2015 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""MIME-stripping filter for Mailman.
This module scans a message for MIME content, removing those sections whose
MIME types match one of a list of matches. multipart/alternative sections are
replaced by the first non-empty component, and multipart/mixed sections
wrapping only single sections after other processing are replaced by their
contents.
"""
__all__ = [
'MIMEDelete',
]
import os
import shutil
import logging
import tempfile
import subprocess
from contextlib import ExitStack
from email.iterators import typed_subpart_iterator
from email.mime.message import MIMEMessage
from email.mime.text import MIMEText
from itertools import count
from lazr.config import as_boolean
from mailman.config import config
from mailman.core import errors
from mailman.core.i18n import _
from mailman.email.message import OwnerNotification
from mailman.interfaces.action import FilterAction
from mailman.interfaces.handler import IHandler
from mailman.utilities.string import oneline
from mailman.version import VERSION
from string import Template
from zope.interface import implementer
log = logging.getLogger('mailman.error')
def dispose(mlist, msg, msgdata, why):
if mlist.filter_action is FilterAction.reject:
# Bounce the message to the original author.
raise errors.RejectMessage(why)
elif mlist.filter_action is FilterAction.forward:
# Forward it on to the list moderators.
text=_("""\
The attached message matched the $mlist.display_name mailing list's content
filtering rules and was prevented from being forwarded on to the list
membership. You are receiving the only remaining copy of the discarded
message.
""")
subject=_('Content filter message notification')
notice = OwnerNotification(mlist, subject, roster=mlist.moderators)
notice.set_type('multipart/mixed')
notice.attach(MIMEText(text))
notice.attach(MIMEMessage(msg))
notice.send(mlist)
# Let this fall through so the original message gets discarded.
elif mlist.filter_action is FilterAction.preserve:
if as_boolean(config.mailman.filtered_messages_are_preservable):
# This is just like discarding the message except that a copy is
# placed in the 'bad' queue should the site administrator want to
# inspect the message.
filebase = config.switchboards['bad'].enqueue(msg, msgdata)
log.info('{0} preserved in file base {1}'.format(
msg.get('message-id', 'n/a'), filebase))
else:
log.error(
'{1} invalid FilterAction: {0}. Treating as discard'.format(
mlist.fqdn_listname, mlist.filter_action.name))
# Most cases also discard the message
raise errors.DiscardMessage(why)
def process(mlist, msg, msgdata):
# We also don't care about our own digests or plaintext
ctype = msg.get_content_type()
mtype = msg.get_content_maintype()
# Check to see if the outer type matches one of the filter types
filtertypes = set(mlist.filter_types)
passtypes = set(mlist.pass_types)
if ctype in filtertypes or mtype in filtertypes:
dispose(mlist, msg, msgdata,
_("The message's content type was explicitly disallowed"))
# Check to see if there is a pass types and the outer type doesn't match
# one of these types
if passtypes and not (ctype in passtypes or mtype in passtypes):
dispose(mlist, msg, msgdata,
_("The message's content type was not explicitly allowed"))
# Filter by file extensions
filterexts = set(mlist.filter_extensions)
passexts = set(mlist.pass_extensions)
fext = get_file_ext(msg)
if fext:
if fext in filterexts:
dispose(mlist, msg, msgdata,
_("The message's file extension was explicitly disallowed"))
if passexts and not (fext in passexts):
dispose(mlist, msg, msgdata,
_("The message's file extension was not explicitly allowed"))
numparts = len([subpart for subpart in msg.walk()])
# If the message is a multipart, filter out matching subparts
if msg.is_multipart():
# Recursively filter out any subparts that match the filter list
prelen = len(msg.get_payload())
filter_parts(msg, filtertypes, passtypes, filterexts, passexts)
# If the outer message is now an empty multipart (and it wasn't
# before!) then, again it gets discarded.
postlen = len(msg.get_payload())
if postlen == 0 and prelen > 0:
dispose(mlist, msg, msgdata,
_("After content filtering, the message was empty"))
# Now replace all multipart/alternatives with just the first non-empty
# alternative. BAW: We have to special case when the outer part is a
# multipart/alternative because we need to retain most of the outer part's
# headers. For now we'll move the subpart's payload into the outer part,
# and then copy over its Content-Type: and Content-Transfer-Encoding:
# headers (any others?).
if mlist.collapse_alternatives:
collapse_multipart_alternatives(msg)
if ctype == 'multipart/alternative':
firstalt = msg.get_payload(0)
reset_payload(msg, firstalt)
# If we removed some parts, make note of this
changedp = 0
if numparts != len([subpart for subpart in msg.walk()]):
changedp = 1
# Now perhaps convert all text/html to text/plain.
if mlist.convert_html_to_plaintext:
changedp += to_plaintext(msg)
# If we're left with only two parts, an empty body and one attachment,
# recast the message to one of just that part
if msg.is_multipart() and len(msg.get_payload()) == 2:
if msg.get_payload(0).get_payload() == '':
useful = msg.get_payload(1)
reset_payload(msg, useful)
changedp = 1
if changedp:
msg['X-Content-Filtered-By'] = 'Mailman/MimeDel {0}'.format(VERSION)
def reset_payload(msg, subpart):
# Reset payload of msg to contents of subpart, and fix up content headers
payload = subpart.get_payload()
msg.set_payload(payload)
del msg['content-type']
del msg['content-transfer-encoding']
del msg['content-disposition']
del msg['content-description']
msg['Content-Type'] = subpart.get('content-type', 'text/plain')
cte = subpart.get('content-transfer-encoding')
if cte:
msg['Content-Transfer-Encoding'] = cte
cdisp = subpart.get('content-disposition')
if cdisp:
msg['Content-Disposition'] = cdisp
cdesc = subpart.get('content-description')
if cdesc:
msg['Content-Description'] = cdesc
def filter_parts(msg, filtertypes, passtypes, filterexts, passexts):
# Look at all the message's subparts, and recursively filter
if not msg.is_multipart():
return True
payload = msg.get_payload()
prelen = len(payload)
newpayload = []
for subpart in payload:
keep = filter_parts(subpart, filtertypes, passtypes,
filterexts, passexts)
if not keep:
continue
ctype = subpart.get_content_type()
mtype = subpart.get_content_maintype()
if ctype in filtertypes or mtype in filtertypes:
# Throw this subpart away
continue
if passtypes and not (ctype in passtypes or mtype in passtypes):
# Throw this subpart away
continue
# check file extension
fext = get_file_ext(subpart)
if fext:
if fext in filterexts:
continue
if passexts and not (fext in passexts):
continue
newpayload.append(subpart)
# Check to see if we discarded all the subparts
postlen = len(newpayload)
msg.set_payload(newpayload)
if postlen == 0 and prelen > 0:
# We threw away everything
return False
return True
def collapse_multipart_alternatives(msg):
if not msg.is_multipart():
return
newpayload = []
for subpart in msg.get_payload():
if subpart.get_content_type() == 'multipart/alternative':
try:
firstalt = subpart.get_payload(0)
newpayload.append(firstalt)
except IndexError:
pass
else:
newpayload.append(subpart)
msg.set_payload(newpayload)
def to_plaintext(msg):
changedp = 0
counter = count()
with ExitStack() as resources:
tempdir = tempfile.mkdtemp()
resources.callback(shutil.rmtree, tempdir)
for subpart in typed_subpart_iterator(msg, 'text', 'html'):
filename = os.path.join(tempdir, '{}.html'.format(next(counter)))
with open(filename, 'w', encoding='utf-8') as fp:
fp.write(subpart.get_payload())
template = Template(config.mailman.html_to_plain_text_command)
command = template.safe_substitute(filename=filename).split()
try:
stdout = subprocess.check_output(
command, universal_newlines=True)
except subprocess.CalledProcessError:
log.exception('HTML -> text/plain command error')
else:
# Replace the payload of the subpart with the converted text
# and tweak the content type.
del subpart['content-transfer-encoding']
subpart.set_payload(stdout)
subpart.set_type('text/plain')
changedp += 1
return changedp
def get_file_ext(m):
"""
Get filename extension. Caution: some virus don't put filename
in 'Content-Disposition' header.
"""
fext = ''
filename = m.get_filename('') or m.get_param('name', '')
if filename:
fext = os.path.splitext(oneline(filename,'utf-8'))[1]
if len(fext) > 1:
fext = fext[1:]
else:
fext = ''
return fext
@implementer(IHandler)
class MIMEDelete:
"""Filter the MIME content of messages."""
name = 'mime-delete'
description = _('Filter the MIME content of messages.')
def process(self, mlist, msg, msgdata):
# Short-circuits
if not mlist.filter_content:
return
if msgdata.get('isdigest'):
return
process(mlist, msg, msgdata)
| gpl-3.0 | -6,272,591,452,590,293,000 | 35.338762 | 78 | 0.653101 | false |
germanovm/vdsm | tests/functional/virtTests.py | 3 | 20332 | #
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import os
import math
import tempfile
import logging
import platform
from stat import S_IROTH
from functools import partial, wraps
from nose.plugins.skip import SkipTest
from testlib import VdsmTestCase as TestCaseBase
from testlib import permutations, expandPermutations
from testlib import temporaryPath
import verify
import caps
from vdsm.utils import CommandPath, RollbackContext
import storageTests as storage
from storage.misc import execCmd
from utils import VdsProxy, SUCCESS
from virt import vmstatus
_mkinitrd = CommandPath("mkinitrd",
"/usr/bin/mkinitrd", # Fedora
"/sbin/mkinitrd") # RHEL 6.x, Centos 6.x
_modprobe = CommandPath("modprobe",
"/usr/sbin/modprobe", # Fedora, Ubuntu
"/sbin/modprobe") # RHEL6
_kernelVer = os.uname()[2]
_kernelPath = "/boot/vmlinuz-" + _kernelVer
_initramfsPath = None
_initramfsPaths = ["/boot/initramfs-%s.img" % _kernelVer, # Fedora, RHEL
"/boot/initrd.img-" + _kernelVer, # Ubuntu
]
_tmpinitramfs = False
VM_MINIMAL_UPTIME = 30
_GRAPHICS_FOR_ARCH = {caps.Architecture.PPC64LE: 'vnc',
caps.Architecture.X86_64: 'qxl'}
class VDSMConnectionError(Exception):
pass
def setUpModule():
# global used in order to keep the iniramfs image persistent across
# different VM tests
global _initramfsPath
global _tmpinitramfs
_initramfsPath = _detectBootImages(_initramfsPaths)
if _initramfsPath is None:
_initramfsPath = _genInitramfs()
_tmpinitramfs = True
def tearDownModule():
if _tmpinitramfs:
os.unlink(_initramfsPath)
def _detectBootImages(initramfsPaths):
if not os.path.isfile(_kernelPath):
raise SkipTest("Can not locate kernel image for release %s" %
_kernelVer)
if not (os.stat(_kernelPath).st_mode & S_IROTH):
raise SkipTest("qemu process can not read the file "
"%s" % _kernelPath)
initramfsPaths = filter(os.path.isfile, initramfsPaths)
if len(initramfsPaths) > 0:
if (os.stat(initramfsPaths[0]).st_mode & S_IROTH):
return initramfsPaths[0]
return None
def _genInitramfs():
logging.warning('Generating a temporary initramfs image')
fd, path = tempfile.mkstemp()
cmd = [_mkinitrd.cmd, "-f", path, _kernelVer]
rc, out, err = execCmd(cmd)
os.chmod(path, 0o644)
return path
def requireKVM(method):
@wraps(method)
def wrapped(self, *args, **kwargs):
status, msg, result = self.vdsm.getVdsCapabilities()
self.assertEqual(status, SUCCESS, msg)
if result['kvmEnabled']:
return method(self, *args, **kwargs)
else:
raise SkipTest('KVM is not enabled')
return wrapped
class RunningVm(object):
KERNEL_ARGS_DISTRO = {
'fedora': 'rd.break=cmdline rd.shell rd.skipfsck',
'rhel': 'rd.break=cmdline rd.shell rd.skipfsck'}
def __init__(self, vdsm, vmDef, distro='fedora',
kernelPath=None, initramfsPath=None):
if kernelPath is None:
kernelPath = _kernelPath
if initramfsPath is None:
initramfsPath = _initramfsPath
if distro.lower() not in self.KERNEL_ARGS_DISTRO:
raise SkipTest("Don't know how to perform direct kernel boot for "
"%s" % distro)
self._template = {'vmId': '11111111-abcd-2222-ffff-333333333333',
'vmName': 'vdsmKernelBootVM',
'kvmEnable': 'true',
'memSize': '256',
'vmType': 'kvm',
'kernelArgs': self.KERNEL_ARGS_DISTRO[distro],
'kernel': kernelPath,
'initrd': initramfsPath}
self._template.update(vmDef)
self._vdsm = vdsm
def start(self):
self._id = self._template['vmId']
self._vdsm.create(self._template)
return self._id
def stop(self):
status, msg = self._vdsm.destroy(self._id)
if status != SUCCESS:
raise VDSMConnectionError(msg)
else:
return SUCCESS
def __enter__(self):
return self.start()
def __exit__(self, type, value, traceback):
self.stop()
@expandPermutations
class VirtTestBase(TestCaseBase, verify.DeviceMixin):
UPSTATES = frozenset((vmstatus.UP, vmstatus.POWERING_UP))
def setUp(self):
self.vdsm = VdsProxy()
def _getVmStatus(self, vmid):
status, msg, result = self.vdsm.getVmStats(vmid)
self.assertEqual(status, SUCCESS, msg)
return result
def assertQemuSetupComplete(self, vmid):
result = self._getVmStatus(vmid)
self.assertTrue(result['status'] != vmstatus.WAIT_FOR_LAUNCH,
'VM is not booting!')
def assertVmBooting(self, vmid):
result = self._getVmStatus(vmid)
self.assertTrue(result['status'] != vmstatus.DOWN,
'VM is not booting!')
def assertVmUp(self, vmid):
result = self._getVmStatus(vmid)
self.assertIn(result['status'], self.UPSTATES)
def assertVmDown(self, vmid):
result = self._getVmStatus(vmid)
self.assertEqual(result['status'], vmstatus.DOWN)
def assertGuestUp(self, vmid, targetUptime=0):
result = self._getVmStatus(vmid)
if targetUptime > 0:
self.assertTrue(int(result['elapsedTime']) >= targetUptime)
else:
self.assertEquals(result['status'], vmstatus.UP)
def _waitForBoot(self, vmid):
self.retryAssert(partial(self.assertQemuSetupComplete, vmid),
timeout=10)
self.retryAssert(partial(self.assertVmBooting, vmid),
timeout=3)
self.retryAssert(partial(self.assertVmUp, vmid),
timeout=10)
def _waitForStartup(self, vmid, targetUptime=0):
self._waitForBoot(vmid)
# 20 % more time on timeout
self.retryAssert(partial(self.assertGuestUp, vmid, targetUptime),
timeout=math.ceil(targetUptime * 1.2))
def _waitForShutdown(self, vmid):
self.retryAssert(partial(self.assertVmDown, vmid),
timeout=10)
def _verifyDevices(self, vmId):
status, msg, stats = self.vdsm.getVmList(vmId)
self.assertEqual(status, SUCCESS, msg)
self.verifyDevicesConf(conf=stats['devices'])
@expandPermutations
class VirtTest(VirtTestBase):
@requireKVM
def testSimpleVm(self):
customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222',
'vmName': 'testSimpleVm',
'display': _GRAPHICS_FOR_ARCH[platform.machine()]}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
@requireKVM
def testComplexVm(self):
customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222',
'vmName': 'testComplexVm',
'display': _GRAPHICS_FOR_ARCH[platform.machine()],
'devices': [
{'type': 'sound', 'device': 'ac97'},
{'type': 'sound', 'device': 'ich6'},
{'type': 'video', 'device': 'qxl'},
{'type': 'video', 'device': 'qxl'},
{'type': 'graphics', 'device': 'spice'},
{'type': 'controller', 'device': 'virtio-serial'},
{'type': 'controller', 'device': 'usb'},
{'type': 'balloon', 'device': 'memballoon',
'specParams': {'model': 'virtio'}},
{'type': 'watchdog', 'device': 'wawtchdog'},
{'type': 'smartcard', 'device': 'smartcard',
'specParams': {'type': 'spicevmc',
'mode': 'passthrough'}},
{'type': 'console', 'device': 'console'},
{'nicModel': 'virtio', 'device': 'bridge',
'macAddr': '52:54:00:59:F5:3F', 'network': '',
'type': 'interface'},
{'nicModel': 'virtio', 'device': 'bridge',
'macAddr': '52:54:00:59:FF:FF', 'network': '',
'type': 'interface'},
]}
status, msg, caps = self.vdsm.getVdsCapabilities()
self.assertEqual(status, SUCCESS, msg)
if caps['rngSources']:
for _ in range(0, 2):
customization['devices'].append(
{'type': 'rng', 'model': 'virtio', 'device': 'rng',
'specParams': {'source': caps['rngSources'][0]}})
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
@requireKVM
def testHeadlessVm(self):
customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222',
'vmName': 'testHeadlessVm'}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
@requireKVM
@permutations([['localfs'], ['iscsi'], ['nfs']])
def testVmWithStorage(self, backendType):
disk = storage.StorageTest()
disk.setUp()
conf = storage.storageLayouts[backendType]
drives = disk.generateDriveConf(conf)
customization = {'vmId': '88888888-eeee-ffff-aaaa-111111111111',
'vmName': 'testVmWithStorage' + backendType,
'drives': drives,
'display': _GRAPHICS_FOR_ARCH[platform.machine()]}
with RollbackContext() as rollback:
disk.createVdsmStorageLayout(conf, 3, rollback)
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
@requireKVM
@permutations([['hotplugNic'], ['virtioNic'], ['smartcard'],
['hotplugDisk'], ['virtioRng']])
def testVmWithDevice(self, *devices):
customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222',
'vmName': 'testVm', 'devices': [],
'display': _GRAPHICS_FOR_ARCH[platform.machine()]}
storageLayout = storage.storageLayouts['localfs']
diskSpecs = storage.StorageTest.generateDriveConf(storageLayout)
pciSpecs = {'bus': '0x00', 'domain': '0x0000',
'function': '0x0', 'type': 'pci'}
ccidSpecs = {'slot': '0', 'controller': '0', 'type': 'ccid'}
pciSlots = [dict({'slot': '0x01'}, **pciSpecs),
dict({'slot': '0x02'}, **pciSpecs),
dict({'slot': '0x03'}, **pciSpecs)]
deviceDef = {'virtioNic': {'nicModel': 'virtio',
'macAddr': '52:54:00:59:F5:3F',
'network': '', 'address': pciSlots[2],
'device': 'bridge', 'type': 'interface',
'linkActive': True,
'filter': 'no-mac-spoofing'},
'hotplugNic': {'vmId': customization['vmId'],
'nic': {'nicModel': 'virtio',
'macAddr': '52:54:00:59:F5:2F',
'network': '',
'address': pciSlots[1],
'device': 'bridge',
'type': 'interface',
'linkActive': True,
'filter': 'no-mac-spoofing'}},
'smartcard': {'type': 'smartcard', 'device': 'smartcard',
'address': ccidSpecs,
'alias': 'smartcard', 'specParams':
{'type': 'spicevmc',
'mode': 'passthrough'}},
'hotplugDisk': {'vmId': customization['vmId'],
'drive': diskSpecs}}
if 'virtioRng' in devices:
status, msg, caps = self.vdsm.getVdsCapabilities()
self.assertEqual(status, SUCCESS, msg)
if not caps['rngSources']:
raise SkipTest('No suitable rng source on host found')
# we can safely pick any device as long, as it exists
deviceDef['virtioRng'] = {'type': 'rng', 'model': 'virtio',
'specParams': {'bytes': '1234',
'period': '20000',
'source':
caps['rngSources'][0]}}
for device in devices:
if 'hotplug' not in device:
customization['devices'].append(deviceDef[device])
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
if 'hotplugNic' in devices:
self.retryAssert(partial(self.vdsm.hotplugNic,
deviceDef['hotplugNic']), timeout=10)
self.retryAssert(partial(self.vdsm.hotunplugNic,
deviceDef['hotplugNic']), timeout=10)
if 'hotplugDisk' in devices:
self.retryAssert(partial(self.vdsm.hotplugDisk,
deviceDef['hotplugDisk']), timeout=10)
self.retryAssert(partial(self.vdsm.hotunplugDisk,
deviceDef['hotplugDisk']), timeout=10)
@permutations([['self'], ['specParams'], ['vmPayload']])
def testVmWithCdrom(self, pathLocation):
customization = {'vmId': '77777777-ffff-3333-bbbb-222222222222',
'devices': [],
'vmName':
'testVmWithCdrom_%s' % pathLocation,
'display': _GRAPHICS_FOR_ARCH[platform.machine()]}
# echo -n testPayload | md5sum
# d37e46c24c78b1aed33496107afdb44b
vmPayloadName = ('/var/run/vdsm/payload/%s.'
'd37e46c24c78b1aed33496107afdb44b'
'.img' % customization['vmId'])
cdrom = {'index': '2', 'iface': 'ide', 'specParams':
{}, 'readonly': 'true', 'path':
'', 'device': 'cdrom', 'shared':
'false', 'type': 'disk'}
with temporaryPath(0o666) as path:
cdromPaths = {'self': {'path': path, 'specParams':
{'path': '/dev/null'}},
'specParams': {'path': '', 'specParams':
{'path': path}},
'vmPayload': {'path': '', 'specParams':
{'path': '',
'vmPayload': {'volId': 'testConfig',
'file': {'testPayload':
''}}}}}
cdrom.update(cdromPaths[pathLocation])
customization['devices'].append(cdrom)
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, 10)
self._verifyDevices(vm)
status, msg, stats = self.vdsm.getVmList(vm)
self.assertEqual(status, SUCCESS, msg)
for device in stats['devices']:
if device['device'] == 'cdrom':
if 'vmPayload' in cdrom['specParams']:
cdrom['path'] = vmPayloadName
self.assertEqual(device['path'], cdrom['path'])
self.assertEqual(device['specParams']['path'],
cdrom['specParams']['path'])
@permutations([['vnc'], ['qxl']])
def testVmDefinitionLegacyGraphics(self, displayType):
customization = {'vmId': '77777777-ffff-3333-cccc-222222222222',
'vmName': 'testLegacyGraphicsVm',
'display': displayType}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
@permutations([['vnc'], ['spice']])
def testVmDefinitionGraphics(self, displayType):
devices = [{'type': 'graphics', 'device': displayType}]
customization = {'vmId': '77777777-ffff-3333-cccc-222222222222',
'vmName': 'testGraphicsDeviceVm',
'devices': devices,
'display': 'qxlnc'}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
status, msg, stats = self.vdsm.getVmStats(vm)
self.assertEqual(status, SUCCESS, msg)
self.assertEqual(stats['displayInfo'][0]['type'],
displayType)
self.assertEqual(stats['displayType'],
'qxl' if displayType == 'spice' else 'vnc')
@permutations([['vnc', 'spice'], ['spice', 'vnc']])
def testVmDefinitionMultipleGraphics(self, primary, secondary):
devices = [{'type': 'graphics', 'device': primary},
{'type': 'graphics', 'device': secondary}]
customization = {'vmId': '77777777-ffff-3333-cccc-222222222222',
'vmName': 'testMultipleGraphicsDeviceVm',
'devices': devices,
'display': 'qxlnc'}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
status, msg, stats = self.vdsm.getVmStats(vm)
self.assertEqual(status, SUCCESS, msg)
for dispInfo, dispType in zip(stats['displayInfo'],
(primary, secondary)):
self.assertEqual(dispInfo['type'], dispType)
self.assertEqual(stats['displayType'],
'qxl' if primary == 'spice' else 'vnc')
def testVmWithSla(self):
customization = {'vmId': '99999999-aaaa-ffff-bbbb-111111111111',
'vmName': 'testVmWithSla',
'display': _GRAPHICS_FOR_ARCH[platform.machine()]}
with RunningVm(self.vdsm, customization) as vm:
self._waitForStartup(vm, VM_MINIMAL_UPTIME)
self._verifyDevices(vm)
status, msg, stats = self.vdsm.getVmStats(vm)
self.assertEqual(status, SUCCESS, msg)
self.vdsm.updateVmPolicy(customization['vmId'],
'50')
self.assertEqual(status, SUCCESS, msg)
| gpl-2.0 | -3,675,394,720,704,090,000 | 40.663934 | 79 | 0.521837 | false |
tchx84/social-sugar | src/jarabe/controlpanel/toolbar.py | 2 | 5122 | # Copyright (C) 2007, 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk
import gettext
from gi.repository import GObject
_ = lambda msg: gettext.dgettext('sugar', msg)
from sugar3.graphics.icon import Icon
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics import iconentry
from sugar3.graphics import style
class MainToolbar(Gtk.Toolbar):
""" Main toolbar of the control panel
"""
__gtype_name__ = 'MainToolbar'
__gsignals__ = {
'stop-clicked': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
'search-changed': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
}
def __init__(self):
Gtk.Toolbar.__init__(self)
self._add_separator()
tool_item = Gtk.ToolItem()
self.insert(tool_item, -1)
tool_item.show()
self._search_entry = iconentry.IconEntry()
self._search_entry.set_icon_from_name(iconentry.ICON_ENTRY_PRIMARY,
'entry-search')
self._search_entry.add_clear_button()
self._search_entry.set_width_chars(25)
text = _('Search in %s') % _('Settings')
self._search_entry.set_placeholder_text(text)
self._search_entry.connect('changed', self.__search_entry_changed_cb)
tool_item.add(self._search_entry)
self._search_entry.show()
self._add_separator(True)
self.stop = ToolButton(icon_name='dialog-cancel')
self.stop.set_tooltip(_('Done'))
self.stop.connect('clicked', self.__stop_clicked_cb)
self.stop.show()
self.insert(self.stop, -1)
self.stop.show()
def get_entry(self):
return self._search_entry
def _add_separator(self, expand=False):
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
if expand:
separator.set_expand(True)
else:
separator.set_size_request(style.DEFAULT_SPACING, -1)
self.insert(separator, -1)
separator.show()
def __search_entry_changed_cb(self, search_entry):
self.emit('search-changed', search_entry.props.text)
def __stop_clicked_cb(self, button):
self.emit('stop-clicked')
class SectionToolbar(Gtk.Toolbar):
""" Toolbar of the sections of the control panel
"""
__gtype_name__ = 'SectionToolbar'
__gsignals__ = {
'cancel-clicked': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
'accept-clicked': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
}
def __init__(self):
Gtk.Toolbar.__init__(self)
self._add_separator()
self._icon = Icon()
self._add_widget(self._icon)
self._add_separator()
self._title = Gtk.Label()
self._add_widget(self._title)
self._add_separator(True)
self.cancel_button = ToolButton('dialog-cancel')
self.cancel_button.set_tooltip(_('Cancel'))
self.cancel_button.connect('clicked', self.__cancel_button_clicked_cb)
self.insert(self.cancel_button, -1)
self.cancel_button.show()
self.accept_button = ToolButton('dialog-ok')
self.accept_button.set_tooltip(_('Ok'))
self.accept_button.connect('clicked', self.__accept_button_clicked_cb)
self.insert(self.accept_button, -1)
self.accept_button.show()
def get_icon(self):
return self._icon
def get_title(self):
return self._title
def _add_separator(self, expand=False):
separator = Gtk.SeparatorToolItem()
separator.props.draw = False
if expand:
separator.set_expand(True)
else:
separator.set_size_request(style.DEFAULT_SPACING, -1)
self.insert(separator, -1)
separator.show()
def _add_widget(self, widget, expand=False):
tool_item = Gtk.ToolItem()
tool_item.set_expand(expand)
tool_item.add(widget)
widget.show()
self.insert(tool_item, -1)
tool_item.show()
def __cancel_button_clicked_cb(self, widget, data=None):
self.emit('cancel-clicked')
def __accept_button_clicked_cb(self, widget, data=None):
self.emit('accept-clicked')
| gpl-2.0 | 2,124,579,432,949,650,700 | 30.617284 | 78 | 0.602109 | false |
technologiescollege/s2a_fr | s2a/Python/Lib/lib2to3/tests/test_fixers.py | 8 | 120124 | """ Test suite for the fixer modules """
# Python imports
import os
import unittest
from itertools import chain
from operator import itemgetter
# Local imports
from lib2to3 import pygram, pytree, refactor, fixer_util
from lib2to3.tests import support
class FixerTestCase(support.TestCase):
# Other test cases can subclass this class and replace "fixer_pkg" with
# their own.
def setUp(self, fix_list=None, fixer_pkg="lib2to3", options=None):
if fix_list is None:
fix_list = [self.fixer]
self.refactor = support.get_refactorer(fixer_pkg, fix_list, options)
self.fixer_log = []
self.filename = u"<string>"
for fixer in chain(self.refactor.pre_order,
self.refactor.post_order):
fixer.log = self.fixer_log
def _check(self, before, after):
before = support.reformat(before)
after = support.reformat(after)
tree = self.refactor.refactor_string(before, self.filename)
self.assertEqual(after, unicode(tree))
return tree
def check(self, before, after, ignore_warnings=False):
tree = self._check(before, after)
self.assertTrue(tree.was_changed)
if not ignore_warnings:
self.assertEqual(self.fixer_log, [])
def warns(self, before, after, message, unchanged=False):
tree = self._check(before, after)
self.assertTrue(message in "".join(self.fixer_log))
if not unchanged:
self.assertTrue(tree.was_changed)
def warns_unchanged(self, before, message):
self.warns(before, before, message, unchanged=True)
def unchanged(self, before, ignore_warnings=False):
self._check(before, before)
if not ignore_warnings:
self.assertEqual(self.fixer_log, [])
def assert_runs_after(self, *names):
fixes = [self.fixer]
fixes.extend(names)
r = support.get_refactorer("lib2to3", fixes)
(pre, post) = r.get_fixers()
n = "fix_" + self.fixer
if post and post[-1].__class__.__module__.endswith(n):
# We're the last fixer to run
return
if pre and pre[-1].__class__.__module__.endswith(n) and not post:
# We're the last in pre and post is empty
return
self.fail("Fixer run order (%s) is incorrect; %s should be last."\
%(", ".join([x.__class__.__module__ for x in (pre+post)]), n))
class Test_ne(FixerTestCase):
fixer = "ne"
def test_basic(self):
b = """if x <> y:
pass"""
a = """if x != y:
pass"""
self.check(b, a)
def test_no_spaces(self):
b = """if x<>y:
pass"""
a = """if x!=y:
pass"""
self.check(b, a)
def test_chained(self):
b = """if x<>y<>z:
pass"""
a = """if x!=y!=z:
pass"""
self.check(b, a)
class Test_has_key(FixerTestCase):
fixer = "has_key"
def test_1(self):
b = """x = d.has_key("x") or d.has_key("y")"""
a = """x = "x" in d or "y" in d"""
self.check(b, a)
def test_2(self):
b = """x = a.b.c.d.has_key("x") ** 3"""
a = """x = ("x" in a.b.c.d) ** 3"""
self.check(b, a)
def test_3(self):
b = """x = a.b.has_key(1 + 2).__repr__()"""
a = """x = (1 + 2 in a.b).__repr__()"""
self.check(b, a)
def test_4(self):
b = """x = a.b.has_key(1 + 2).__repr__() ** -3 ** 4"""
a = """x = (1 + 2 in a.b).__repr__() ** -3 ** 4"""
self.check(b, a)
def test_5(self):
b = """x = a.has_key(f or g)"""
a = """x = (f or g) in a"""
self.check(b, a)
def test_6(self):
b = """x = a + b.has_key(c)"""
a = """x = a + (c in b)"""
self.check(b, a)
def test_7(self):
b = """x = a.has_key(lambda: 12)"""
a = """x = (lambda: 12) in a"""
self.check(b, a)
def test_8(self):
b = """x = a.has_key(a for a in b)"""
a = """x = (a for a in b) in a"""
self.check(b, a)
def test_9(self):
b = """if not a.has_key(b): pass"""
a = """if b not in a: pass"""
self.check(b, a)
def test_10(self):
b = """if not a.has_key(b).__repr__(): pass"""
a = """if not (b in a).__repr__(): pass"""
self.check(b, a)
def test_11(self):
b = """if not a.has_key(b) ** 2: pass"""
a = """if not (b in a) ** 2: pass"""
self.check(b, a)
class Test_apply(FixerTestCase):
fixer = "apply"
def test_1(self):
b = """x = apply(f, g + h)"""
a = """x = f(*g + h)"""
self.check(b, a)
def test_2(self):
b = """y = apply(f, g, h)"""
a = """y = f(*g, **h)"""
self.check(b, a)
def test_3(self):
b = """z = apply(fs[0], g or h, h or g)"""
a = """z = fs[0](*g or h, **h or g)"""
self.check(b, a)
def test_4(self):
b = """apply(f, (x, y) + t)"""
a = """f(*(x, y) + t)"""
self.check(b, a)
def test_5(self):
b = """apply(f, args,)"""
a = """f(*args)"""
self.check(b, a)
def test_6(self):
b = """apply(f, args, kwds,)"""
a = """f(*args, **kwds)"""
self.check(b, a)
# Test that complex functions are parenthesized
def test_complex_1(self):
b = """x = apply(f+g, args)"""
a = """x = (f+g)(*args)"""
self.check(b, a)
def test_complex_2(self):
b = """x = apply(f*g, args)"""
a = """x = (f*g)(*args)"""
self.check(b, a)
def test_complex_3(self):
b = """x = apply(f**g, args)"""
a = """x = (f**g)(*args)"""
self.check(b, a)
# But dotted names etc. not
def test_dotted_name(self):
b = """x = apply(f.g, args)"""
a = """x = f.g(*args)"""
self.check(b, a)
def test_subscript(self):
b = """x = apply(f[x], args)"""
a = """x = f[x](*args)"""
self.check(b, a)
def test_call(self):
b = """x = apply(f(), args)"""
a = """x = f()(*args)"""
self.check(b, a)
# Extreme case
def test_extreme(self):
b = """x = apply(a.b.c.d.e.f, args, kwds)"""
a = """x = a.b.c.d.e.f(*args, **kwds)"""
self.check(b, a)
# XXX Comments in weird places still get lost
def test_weird_comments(self):
b = """apply( # foo
f, # bar
args)"""
a = """f(*args)"""
self.check(b, a)
# These should *not* be touched
def test_unchanged_1(self):
s = """apply()"""
self.unchanged(s)
def test_unchanged_2(self):
s = """apply(f)"""
self.unchanged(s)
def test_unchanged_3(self):
s = """apply(f,)"""
self.unchanged(s)
def test_unchanged_4(self):
s = """apply(f, args, kwds, extras)"""
self.unchanged(s)
def test_unchanged_5(self):
s = """apply(f, *args, **kwds)"""
self.unchanged(s)
def test_unchanged_6(self):
s = """apply(f, *args)"""
self.unchanged(s)
def test_unchanged_7(self):
s = """apply(func=f, args=args, kwds=kwds)"""
self.unchanged(s)
def test_unchanged_8(self):
s = """apply(f, args=args, kwds=kwds)"""
self.unchanged(s)
def test_unchanged_9(self):
s = """apply(f, args, kwds=kwds)"""
self.unchanged(s)
def test_space_1(self):
a = """apply( f, args, kwds)"""
b = """f(*args, **kwds)"""
self.check(a, b)
def test_space_2(self):
a = """apply( f ,args,kwds )"""
b = """f(*args, **kwds)"""
self.check(a, b)
class Test_intern(FixerTestCase):
fixer = "intern"
def test_prefix_preservation(self):
b = """x = intern( a )"""
a = """import sys\nx = sys.intern( a )"""
self.check(b, a)
b = """y = intern("b" # test
)"""
a = """import sys\ny = sys.intern("b" # test
)"""
self.check(b, a)
b = """z = intern(a+b+c.d, )"""
a = """import sys\nz = sys.intern(a+b+c.d, )"""
self.check(b, a)
def test(self):
b = """x = intern(a)"""
a = """import sys\nx = sys.intern(a)"""
self.check(b, a)
b = """z = intern(a+b+c.d,)"""
a = """import sys\nz = sys.intern(a+b+c.d,)"""
self.check(b, a)
b = """intern("y%s" % 5).replace("y", "")"""
a = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
self.check(b, a)
# These should not be refactored
def test_unchanged(self):
s = """intern(a=1)"""
self.unchanged(s)
s = """intern(f, g)"""
self.unchanged(s)
s = """intern(*h)"""
self.unchanged(s)
s = """intern(**i)"""
self.unchanged(s)
s = """intern()"""
self.unchanged(s)
class Test_reduce(FixerTestCase):
fixer = "reduce"
def test_simple_call(self):
b = "reduce(a, b, c)"
a = "from functools import reduce\nreduce(a, b, c)"
self.check(b, a)
def test_bug_7253(self):
# fix_tuple_params was being bad and orphaning nodes in the tree.
b = "def x(arg): reduce(sum, [])"
a = "from functools import reduce\ndef x(arg): reduce(sum, [])"
self.check(b, a)
def test_call_with_lambda(self):
b = "reduce(lambda x, y: x + y, seq)"
a = "from functools import reduce\nreduce(lambda x, y: x + y, seq)"
self.check(b, a)
def test_unchanged(self):
s = "reduce(a)"
self.unchanged(s)
s = "reduce(a, b=42)"
self.unchanged(s)
s = "reduce(a, b, c, d)"
self.unchanged(s)
s = "reduce(**c)"
self.unchanged(s)
s = "reduce()"
self.unchanged(s)
class Test_print(FixerTestCase):
fixer = "print"
def test_prefix_preservation(self):
b = """print 1, 1+1, 1+1+1"""
a = """print(1, 1+1, 1+1+1)"""
self.check(b, a)
def test_idempotency(self):
s = """print()"""
self.unchanged(s)
s = """print('')"""
self.unchanged(s)
def test_idempotency_print_as_function(self):
self.refactor.driver.grammar = pygram.python_grammar_no_print_statement
s = """print(1, 1+1, 1+1+1)"""
self.unchanged(s)
s = """print()"""
self.unchanged(s)
s = """print('')"""
self.unchanged(s)
def test_1(self):
b = """print 1, 1+1, 1+1+1"""
a = """print(1, 1+1, 1+1+1)"""
self.check(b, a)
def test_2(self):
b = """print 1, 2"""
a = """print(1, 2)"""
self.check(b, a)
def test_3(self):
b = """print"""
a = """print()"""
self.check(b, a)
def test_4(self):
# from bug 3000
b = """print whatever; print"""
a = """print(whatever); print()"""
self.check(b, a)
def test_5(self):
b = """print; print whatever;"""
a = """print(); print(whatever);"""
self.check(b, a)
def test_tuple(self):
b = """print (a, b, c)"""
a = """print((a, b, c))"""
self.check(b, a)
# trailing commas
def test_trailing_comma_1(self):
b = """print 1, 2, 3,"""
a = """print(1, 2, 3, end=' ')"""
self.check(b, a)
def test_trailing_comma_2(self):
b = """print 1, 2,"""
a = """print(1, 2, end=' ')"""
self.check(b, a)
def test_trailing_comma_3(self):
b = """print 1,"""
a = """print(1, end=' ')"""
self.check(b, a)
# >> stuff
def test_vargs_without_trailing_comma(self):
b = """print >>sys.stderr, 1, 2, 3"""
a = """print(1, 2, 3, file=sys.stderr)"""
self.check(b, a)
def test_with_trailing_comma(self):
b = """print >>sys.stderr, 1, 2,"""
a = """print(1, 2, end=' ', file=sys.stderr)"""
self.check(b, a)
def test_no_trailing_comma(self):
b = """print >>sys.stderr, 1+1"""
a = """print(1+1, file=sys.stderr)"""
self.check(b, a)
def test_spaces_before_file(self):
b = """print >> sys.stderr"""
a = """print(file=sys.stderr)"""
self.check(b, a)
def test_with_future_print_function(self):
s = "from __future__ import print_function\n" \
"print('Hai!', end=' ')"
self.unchanged(s)
b = "print 'Hello, world!'"
a = "print('Hello, world!')"
self.check(b, a)
class Test_exec(FixerTestCase):
fixer = "exec"
def test_prefix_preservation(self):
b = """ exec code in ns1, ns2"""
a = """ exec(code, ns1, ns2)"""
self.check(b, a)
def test_basic(self):
b = """exec code"""
a = """exec(code)"""
self.check(b, a)
def test_with_globals(self):
b = """exec code in ns"""
a = """exec(code, ns)"""
self.check(b, a)
def test_with_globals_locals(self):
b = """exec code in ns1, ns2"""
a = """exec(code, ns1, ns2)"""
self.check(b, a)
def test_complex_1(self):
b = """exec (a.b()) in ns"""
a = """exec((a.b()), ns)"""
self.check(b, a)
def test_complex_2(self):
b = """exec a.b() + c in ns"""
a = """exec(a.b() + c, ns)"""
self.check(b, a)
# These should not be touched
def test_unchanged_1(self):
s = """exec(code)"""
self.unchanged(s)
def test_unchanged_2(self):
s = """exec (code)"""
self.unchanged(s)
def test_unchanged_3(self):
s = """exec(code, ns)"""
self.unchanged(s)
def test_unchanged_4(self):
s = """exec(code, ns1, ns2)"""
self.unchanged(s)
class Test_repr(FixerTestCase):
fixer = "repr"
def test_prefix_preservation(self):
b = """x = `1 + 2`"""
a = """x = repr(1 + 2)"""
self.check(b, a)
def test_simple_1(self):
b = """x = `1 + 2`"""
a = """x = repr(1 + 2)"""
self.check(b, a)
def test_simple_2(self):
b = """y = `x`"""
a = """y = repr(x)"""
self.check(b, a)
def test_complex(self):
b = """z = `y`.__repr__()"""
a = """z = repr(y).__repr__()"""
self.check(b, a)
def test_tuple(self):
b = """x = `1, 2, 3`"""
a = """x = repr((1, 2, 3))"""
self.check(b, a)
def test_nested(self):
b = """x = `1 + `2``"""
a = """x = repr(1 + repr(2))"""
self.check(b, a)
def test_nested_tuples(self):
b = """x = `1, 2 + `3, 4``"""
a = """x = repr((1, 2 + repr((3, 4))))"""
self.check(b, a)
class Test_except(FixerTestCase):
fixer = "except"
def test_prefix_preservation(self):
b = """
try:
pass
except (RuntimeError, ImportError), e:
pass"""
a = """
try:
pass
except (RuntimeError, ImportError) as e:
pass"""
self.check(b, a)
def test_simple(self):
b = """
try:
pass
except Foo, e:
pass"""
a = """
try:
pass
except Foo as e:
pass"""
self.check(b, a)
def test_simple_no_space_before_target(self):
b = """
try:
pass
except Foo,e:
pass"""
a = """
try:
pass
except Foo as e:
pass"""
self.check(b, a)
def test_tuple_unpack(self):
b = """
def foo():
try:
pass
except Exception, (f, e):
pass
except ImportError, e:
pass"""
a = """
def foo():
try:
pass
except Exception as xxx_todo_changeme:
(f, e) = xxx_todo_changeme.args
pass
except ImportError as e:
pass"""
self.check(b, a)
def test_multi_class(self):
b = """
try:
pass
except (RuntimeError, ImportError), e:
pass"""
a = """
try:
pass
except (RuntimeError, ImportError) as e:
pass"""
self.check(b, a)
def test_list_unpack(self):
b = """
try:
pass
except Exception, [a, b]:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
[a, b] = xxx_todo_changeme.args
pass"""
self.check(b, a)
def test_weird_target_1(self):
b = """
try:
pass
except Exception, d[5]:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
d[5] = xxx_todo_changeme
pass"""
self.check(b, a)
def test_weird_target_2(self):
b = """
try:
pass
except Exception, a.foo:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
a.foo = xxx_todo_changeme
pass"""
self.check(b, a)
def test_weird_target_3(self):
b = """
try:
pass
except Exception, a().foo:
pass"""
a = """
try:
pass
except Exception as xxx_todo_changeme:
a().foo = xxx_todo_changeme
pass"""
self.check(b, a)
def test_bare_except(self):
b = """
try:
pass
except Exception, a:
pass
except:
pass"""
a = """
try:
pass
except Exception as a:
pass
except:
pass"""
self.check(b, a)
def test_bare_except_and_else_finally(self):
b = """
try:
pass
except Exception, a:
pass
except:
pass
else:
pass
finally:
pass"""
a = """
try:
pass
except Exception as a:
pass
except:
pass
else:
pass
finally:
pass"""
self.check(b, a)
def test_multi_fixed_excepts_before_bare_except(self):
b = """
try:
pass
except TypeError, b:
pass
except Exception, a:
pass
except:
pass"""
a = """
try:
pass
except TypeError as b:
pass
except Exception as a:
pass
except:
pass"""
self.check(b, a)
def test_one_line_suites(self):
b = """
try: raise TypeError
except TypeError, e:
pass
"""
a = """
try: raise TypeError
except TypeError as e:
pass
"""
self.check(b, a)
b = """
try:
raise TypeError
except TypeError, e: pass
"""
a = """
try:
raise TypeError
except TypeError as e: pass
"""
self.check(b, a)
b = """
try: raise TypeError
except TypeError, e: pass
"""
a = """
try: raise TypeError
except TypeError as e: pass
"""
self.check(b, a)
b = """
try: raise TypeError
except TypeError, e: pass
else: function()
finally: done()
"""
a = """
try: raise TypeError
except TypeError as e: pass
else: function()
finally: done()
"""
self.check(b, a)
# These should not be touched:
def test_unchanged_1(self):
s = """
try:
pass
except:
pass"""
self.unchanged(s)
def test_unchanged_2(self):
s = """
try:
pass
except Exception:
pass"""
self.unchanged(s)
def test_unchanged_3(self):
s = """
try:
pass
except (Exception, SystemExit):
pass"""
self.unchanged(s)
class Test_raise(FixerTestCase):
fixer = "raise"
def test_basic(self):
b = """raise Exception, 5"""
a = """raise Exception(5)"""
self.check(b, a)
def test_prefix_preservation(self):
b = """raise Exception,5"""
a = """raise Exception(5)"""
self.check(b, a)
b = """raise Exception, 5"""
a = """raise Exception(5)"""
self.check(b, a)
def test_with_comments(self):
b = """raise Exception, 5 # foo"""
a = """raise Exception(5) # foo"""
self.check(b, a)
b = """raise E, (5, 6) % (a, b) # foo"""
a = """raise E((5, 6) % (a, b)) # foo"""
self.check(b, a)
b = """def foo():
raise Exception, 5, 6 # foo"""
a = """def foo():
raise Exception(5).with_traceback(6) # foo"""
self.check(b, a)
def test_None_value(self):
b = """raise Exception(5), None, tb"""
a = """raise Exception(5).with_traceback(tb)"""
self.check(b, a)
def test_tuple_value(self):
b = """raise Exception, (5, 6, 7)"""
a = """raise Exception(5, 6, 7)"""
self.check(b, a)
def test_tuple_detection(self):
b = """raise E, (5, 6) % (a, b)"""
a = """raise E((5, 6) % (a, b))"""
self.check(b, a)
def test_tuple_exc_1(self):
b = """raise (((E1, E2), E3), E4), V"""
a = """raise E1(V)"""
self.check(b, a)
def test_tuple_exc_2(self):
b = """raise (E1, (E2, E3), E4), V"""
a = """raise E1(V)"""
self.check(b, a)
# These should produce a warning
def test_string_exc(self):
s = """raise 'foo'"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_string_exc_val(self):
s = """raise "foo", 5"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_string_exc_val_tb(self):
s = """raise "foo", 5, 6"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
# These should result in traceback-assignment
def test_tb_1(self):
b = """def foo():
raise Exception, 5, 6"""
a = """def foo():
raise Exception(5).with_traceback(6)"""
self.check(b, a)
def test_tb_2(self):
b = """def foo():
a = 5
raise Exception, 5, 6
b = 6"""
a = """def foo():
a = 5
raise Exception(5).with_traceback(6)
b = 6"""
self.check(b, a)
def test_tb_3(self):
b = """def foo():
raise Exception,5,6"""
a = """def foo():
raise Exception(5).with_traceback(6)"""
self.check(b, a)
def test_tb_4(self):
b = """def foo():
a = 5
raise Exception,5,6
b = 6"""
a = """def foo():
a = 5
raise Exception(5).with_traceback(6)
b = 6"""
self.check(b, a)
def test_tb_5(self):
b = """def foo():
raise Exception, (5, 6, 7), 6"""
a = """def foo():
raise Exception(5, 6, 7).with_traceback(6)"""
self.check(b, a)
def test_tb_6(self):
b = """def foo():
a = 5
raise Exception, (5, 6, 7), 6
b = 6"""
a = """def foo():
a = 5
raise Exception(5, 6, 7).with_traceback(6)
b = 6"""
self.check(b, a)
class Test_throw(FixerTestCase):
fixer = "throw"
def test_1(self):
b = """g.throw(Exception, 5)"""
a = """g.throw(Exception(5))"""
self.check(b, a)
def test_2(self):
b = """g.throw(Exception,5)"""
a = """g.throw(Exception(5))"""
self.check(b, a)
def test_3(self):
b = """g.throw(Exception, (5, 6, 7))"""
a = """g.throw(Exception(5, 6, 7))"""
self.check(b, a)
def test_4(self):
b = """5 + g.throw(Exception, 5)"""
a = """5 + g.throw(Exception(5))"""
self.check(b, a)
# These should produce warnings
def test_warn_1(self):
s = """g.throw("foo")"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_warn_2(self):
s = """g.throw("foo", 5)"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
def test_warn_3(self):
s = """g.throw("foo", 5, 6)"""
self.warns_unchanged(s, "Python 3 does not support string exceptions")
# These should not be touched
def test_untouched_1(self):
s = """g.throw(Exception)"""
self.unchanged(s)
def test_untouched_2(self):
s = """g.throw(Exception(5, 6))"""
self.unchanged(s)
def test_untouched_3(self):
s = """5 + g.throw(Exception(5, 6))"""
self.unchanged(s)
# These should result in traceback-assignment
def test_tb_1(self):
b = """def foo():
g.throw(Exception, 5, 6)"""
a = """def foo():
g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_2(self):
b = """def foo():
a = 5
g.throw(Exception, 5, 6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_3(self):
b = """def foo():
g.throw(Exception,5,6)"""
a = """def foo():
g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_4(self):
b = """def foo():
a = 5
g.throw(Exception,5,6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_5(self):
b = """def foo():
g.throw(Exception, (5, 6, 7), 6)"""
a = """def foo():
g.throw(Exception(5, 6, 7).with_traceback(6))"""
self.check(b, a)
def test_tb_6(self):
b = """def foo():
a = 5
g.throw(Exception, (5, 6, 7), 6)
b = 6"""
a = """def foo():
a = 5
g.throw(Exception(5, 6, 7).with_traceback(6))
b = 6"""
self.check(b, a)
def test_tb_7(self):
b = """def foo():
a + g.throw(Exception, 5, 6)"""
a = """def foo():
a + g.throw(Exception(5).with_traceback(6))"""
self.check(b, a)
def test_tb_8(self):
b = """def foo():
a = 5
a + g.throw(Exception, 5, 6)
b = 6"""
a = """def foo():
a = 5
a + g.throw(Exception(5).with_traceback(6))
b = 6"""
self.check(b, a)
class Test_long(FixerTestCase):
fixer = "long"
def test_1(self):
b = """x = long(x)"""
a = """x = int(x)"""
self.check(b, a)
def test_2(self):
b = """y = isinstance(x, long)"""
a = """y = isinstance(x, int)"""
self.check(b, a)
def test_3(self):
b = """z = type(x) in (int, long)"""
a = """z = type(x) in (int, int)"""
self.check(b, a)
def test_unchanged(self):
s = """long = True"""
self.unchanged(s)
s = """s.long = True"""
self.unchanged(s)
s = """def long(): pass"""
self.unchanged(s)
s = """class long(): pass"""
self.unchanged(s)
s = """def f(long): pass"""
self.unchanged(s)
s = """def f(g, long): pass"""
self.unchanged(s)
s = """def f(x, long=True): pass"""
self.unchanged(s)
def test_prefix_preservation(self):
b = """x = long( x )"""
a = """x = int( x )"""
self.check(b, a)
class Test_execfile(FixerTestCase):
fixer = "execfile"
def test_conversion(self):
b = """execfile("fn")"""
a = """exec(compile(open("fn").read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), glob)"""
self.check(b, a)
b = """execfile("fn", glob, loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), glob, loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob)"""
self.check(b, a)
b = """execfile("fn", locals=loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), locals=loc)"""
self.check(b, a)
b = """execfile("fn", globals=glob, locals=loc)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals=glob, locals=loc)"""
self.check(b, a)
def test_spacing(self):
b = """execfile( "fn" )"""
a = """exec(compile(open( "fn" ).read(), "fn", 'exec'))"""
self.check(b, a)
b = """execfile("fn", globals = glob)"""
a = """exec(compile(open("fn").read(), "fn", 'exec'), globals = glob)"""
self.check(b, a)
class Test_isinstance(FixerTestCase):
fixer = "isinstance"
def test_remove_multiple_items(self):
b = """isinstance(x, (int, int, int))"""
a = """isinstance(x, int)"""
self.check(b, a)
b = """isinstance(x, (int, float, int, int, float))"""
a = """isinstance(x, (int, float))"""
self.check(b, a)
b = """isinstance(x, (int, float, int, int, float, str))"""
a = """isinstance(x, (int, float, str))"""
self.check(b, a)
b = """isinstance(foo() + bar(), (x(), y(), x(), int, int))"""
a = """isinstance(foo() + bar(), (x(), y(), x(), int))"""
self.check(b, a)
def test_prefix_preservation(self):
b = """if isinstance( foo(), ( bar, bar, baz )) : pass"""
a = """if isinstance( foo(), ( bar, baz )) : pass"""
self.check(b, a)
def test_unchanged(self):
self.unchanged("isinstance(x, (str, int))")
class Test_dict(FixerTestCase):
fixer = "dict"
def test_prefix_preservation(self):
b = "if d. keys ( ) : pass"
a = "if list(d. keys ( )) : pass"
self.check(b, a)
b = "if d. items ( ) : pass"
a = "if list(d. items ( )) : pass"
self.check(b, a)
b = "if d. iterkeys ( ) : pass"
a = "if iter(d. keys ( )) : pass"
self.check(b, a)
b = "[i for i in d. iterkeys( ) ]"
a = "[i for i in d. keys( ) ]"
self.check(b, a)
b = "if d. viewkeys ( ) : pass"
a = "if d. keys ( ) : pass"
self.check(b, a)
b = "[i for i in d. viewkeys( ) ]"
a = "[i for i in d. keys( ) ]"
self.check(b, a)
def test_trailing_comment(self):
b = "d.keys() # foo"
a = "list(d.keys()) # foo"
self.check(b, a)
b = "d.items() # foo"
a = "list(d.items()) # foo"
self.check(b, a)
b = "d.iterkeys() # foo"
a = "iter(d.keys()) # foo"
self.check(b, a)
b = """[i for i in d.iterkeys() # foo
]"""
a = """[i for i in d.keys() # foo
]"""
self.check(b, a)
b = """[i for i in d.iterkeys() # foo
]"""
a = """[i for i in d.keys() # foo
]"""
self.check(b, a)
b = "d.viewitems() # foo"
a = "d.items() # foo"
self.check(b, a)
def test_unchanged(self):
for wrapper in fixer_util.consuming_calls:
s = "s = %s(d.keys())" % wrapper
self.unchanged(s)
s = "s = %s(d.values())" % wrapper
self.unchanged(s)
s = "s = %s(d.items())" % wrapper
self.unchanged(s)
def test_01(self):
b = "d.keys()"
a = "list(d.keys())"
self.check(b, a)
b = "a[0].foo().keys()"
a = "list(a[0].foo().keys())"
self.check(b, a)
def test_02(self):
b = "d.items()"
a = "list(d.items())"
self.check(b, a)
def test_03(self):
b = "d.values()"
a = "list(d.values())"
self.check(b, a)
def test_04(self):
b = "d.iterkeys()"
a = "iter(d.keys())"
self.check(b, a)
def test_05(self):
b = "d.iteritems()"
a = "iter(d.items())"
self.check(b, a)
def test_06(self):
b = "d.itervalues()"
a = "iter(d.values())"
self.check(b, a)
def test_07(self):
s = "list(d.keys())"
self.unchanged(s)
def test_08(self):
s = "sorted(d.keys())"
self.unchanged(s)
def test_09(self):
b = "iter(d.keys())"
a = "iter(list(d.keys()))"
self.check(b, a)
def test_10(self):
b = "foo(d.keys())"
a = "foo(list(d.keys()))"
self.check(b, a)
def test_11(self):
b = "for i in d.keys(): print i"
a = "for i in list(d.keys()): print i"
self.check(b, a)
def test_12(self):
b = "for i in d.iterkeys(): print i"
a = "for i in d.keys(): print i"
self.check(b, a)
def test_13(self):
b = "[i for i in d.keys()]"
a = "[i for i in list(d.keys())]"
self.check(b, a)
def test_14(self):
b = "[i for i in d.iterkeys()]"
a = "[i for i in d.keys()]"
self.check(b, a)
def test_15(self):
b = "(i for i in d.keys())"
a = "(i for i in list(d.keys()))"
self.check(b, a)
def test_16(self):
b = "(i for i in d.iterkeys())"
a = "(i for i in d.keys())"
self.check(b, a)
def test_17(self):
b = "iter(d.iterkeys())"
a = "iter(d.keys())"
self.check(b, a)
def test_18(self):
b = "list(d.iterkeys())"
a = "list(d.keys())"
self.check(b, a)
def test_19(self):
b = "sorted(d.iterkeys())"
a = "sorted(d.keys())"
self.check(b, a)
def test_20(self):
b = "foo(d.iterkeys())"
a = "foo(iter(d.keys()))"
self.check(b, a)
def test_21(self):
b = "print h.iterkeys().next()"
a = "print iter(h.keys()).next()"
self.check(b, a)
def test_22(self):
b = "print h.keys()[0]"
a = "print list(h.keys())[0]"
self.check(b, a)
def test_23(self):
b = "print list(h.iterkeys().next())"
a = "print list(iter(h.keys()).next())"
self.check(b, a)
def test_24(self):
b = "for x in h.keys()[0]: print x"
a = "for x in list(h.keys())[0]: print x"
self.check(b, a)
def test_25(self):
b = "d.viewkeys()"
a = "d.keys()"
self.check(b, a)
def test_26(self):
b = "d.viewitems()"
a = "d.items()"
self.check(b, a)
def test_27(self):
b = "d.viewvalues()"
a = "d.values()"
self.check(b, a)
def test_28(self):
b = "[i for i in d.viewkeys()]"
a = "[i for i in d.keys()]"
self.check(b, a)
def test_29(self):
b = "(i for i in d.viewkeys())"
a = "(i for i in d.keys())"
self.check(b, a)
def test_30(self):
b = "iter(d.viewkeys())"
a = "iter(d.keys())"
self.check(b, a)
def test_31(self):
b = "list(d.viewkeys())"
a = "list(d.keys())"
self.check(b, a)
def test_32(self):
b = "sorted(d.viewkeys())"
a = "sorted(d.keys())"
self.check(b, a)
class Test_xrange(FixerTestCase):
fixer = "xrange"
def test_prefix_preservation(self):
b = """x = xrange( 10 )"""
a = """x = range( 10 )"""
self.check(b, a)
b = """x = xrange( 1 , 10 )"""
a = """x = range( 1 , 10 )"""
self.check(b, a)
b = """x = xrange( 0 , 10 , 2 )"""
a = """x = range( 0 , 10 , 2 )"""
self.check(b, a)
def test_single_arg(self):
b = """x = xrange(10)"""
a = """x = range(10)"""
self.check(b, a)
def test_two_args(self):
b = """x = xrange(1, 10)"""
a = """x = range(1, 10)"""
self.check(b, a)
def test_three_args(self):
b = """x = xrange(0, 10, 2)"""
a = """x = range(0, 10, 2)"""
self.check(b, a)
def test_wrap_in_list(self):
b = """x = range(10, 3, 9)"""
a = """x = list(range(10, 3, 9))"""
self.check(b, a)
b = """x = foo(range(10, 3, 9))"""
a = """x = foo(list(range(10, 3, 9)))"""
self.check(b, a)
b = """x = range(10, 3, 9) + [4]"""
a = """x = list(range(10, 3, 9)) + [4]"""
self.check(b, a)
b = """x = range(10)[::-1]"""
a = """x = list(range(10))[::-1]"""
self.check(b, a)
b = """x = range(10) [3]"""
a = """x = list(range(10)) [3]"""
self.check(b, a)
def test_xrange_in_for(self):
b = """for i in xrange(10):\n j=i"""
a = """for i in range(10):\n j=i"""
self.check(b, a)
b = """[i for i in xrange(10)]"""
a = """[i for i in range(10)]"""
self.check(b, a)
def test_range_in_for(self):
self.unchanged("for i in range(10): pass")
self.unchanged("[i for i in range(10)]")
def test_in_contains_test(self):
self.unchanged("x in range(10, 3, 9)")
def test_in_consuming_context(self):
for call in fixer_util.consuming_calls:
self.unchanged("a = %s(range(10))" % call)
class Test_xrange_with_reduce(FixerTestCase):
def setUp(self):
super(Test_xrange_with_reduce, self).setUp(["xrange", "reduce"])
def test_double_transform(self):
b = """reduce(x, xrange(5))"""
a = """from functools import reduce
reduce(x, range(5))"""
self.check(b, a)
class Test_raw_input(FixerTestCase):
fixer = "raw_input"
def test_prefix_preservation(self):
b = """x = raw_input( )"""
a = """x = input( )"""
self.check(b, a)
b = """x = raw_input( '' )"""
a = """x = input( '' )"""
self.check(b, a)
def test_1(self):
b = """x = raw_input()"""
a = """x = input()"""
self.check(b, a)
def test_2(self):
b = """x = raw_input('')"""
a = """x = input('')"""
self.check(b, a)
def test_3(self):
b = """x = raw_input('prompt')"""
a = """x = input('prompt')"""
self.check(b, a)
def test_4(self):
b = """x = raw_input(foo(a) + 6)"""
a = """x = input(foo(a) + 6)"""
self.check(b, a)
def test_5(self):
b = """x = raw_input(invite).split()"""
a = """x = input(invite).split()"""
self.check(b, a)
def test_6(self):
b = """x = raw_input(invite) . split ()"""
a = """x = input(invite) . split ()"""
self.check(b, a)
def test_8(self):
b = "x = int(raw_input())"
a = "x = int(input())"
self.check(b, a)
class Test_funcattrs(FixerTestCase):
fixer = "funcattrs"
attrs = ["closure", "doc", "name", "defaults", "code", "globals", "dict"]
def test(self):
for attr in self.attrs:
b = "a.func_%s" % attr
a = "a.__%s__" % attr
self.check(b, a)
b = "self.foo.func_%s.foo_bar" % attr
a = "self.foo.__%s__.foo_bar" % attr
self.check(b, a)
def test_unchanged(self):
for attr in self.attrs:
s = "foo(func_%s + 5)" % attr
self.unchanged(s)
s = "f(foo.__%s__)" % attr
self.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
self.unchanged(s)
class Test_xreadlines(FixerTestCase):
fixer = "xreadlines"
def test_call(self):
b = "for x in f.xreadlines(): pass"
a = "for x in f: pass"
self.check(b, a)
b = "for x in foo().xreadlines(): pass"
a = "for x in foo(): pass"
self.check(b, a)
b = "for x in (5 + foo()).xreadlines(): pass"
a = "for x in (5 + foo()): pass"
self.check(b, a)
def test_attr_ref(self):
b = "foo(f.xreadlines + 5)"
a = "foo(f.__iter__ + 5)"
self.check(b, a)
b = "foo(f().xreadlines + 5)"
a = "foo(f().__iter__ + 5)"
self.check(b, a)
b = "foo((5 + f()).xreadlines + 5)"
a = "foo((5 + f()).__iter__ + 5)"
self.check(b, a)
def test_unchanged(self):
s = "for x in f.xreadlines(5): pass"
self.unchanged(s)
s = "for x in f.xreadlines(k=5): pass"
self.unchanged(s)
s = "for x in f.xreadlines(*k, **v): pass"
self.unchanged(s)
s = "foo(xreadlines)"
self.unchanged(s)
class ImportsFixerTests:
def test_import_module(self):
for old, new in self.modules.items():
b = "import %s" % old
a = "import %s" % new
self.check(b, a)
b = "import foo, %s, bar" % old
a = "import foo, %s, bar" % new
self.check(b, a)
def test_import_from(self):
for old, new in self.modules.items():
b = "from %s import foo" % old
a = "from %s import foo" % new
self.check(b, a)
b = "from %s import foo, bar" % old
a = "from %s import foo, bar" % new
self.check(b, a)
b = "from %s import (yes, no)" % old
a = "from %s import (yes, no)" % new
self.check(b, a)
def test_import_module_as(self):
for old, new in self.modules.items():
b = "import %s as foo_bar" % old
a = "import %s as foo_bar" % new
self.check(b, a)
b = "import %s as foo_bar" % old
a = "import %s as foo_bar" % new
self.check(b, a)
def test_import_from_as(self):
for old, new in self.modules.items():
b = "from %s import foo as bar" % old
a = "from %s import foo as bar" % new
self.check(b, a)
def test_star(self):
for old, new in self.modules.items():
b = "from %s import *" % old
a = "from %s import *" % new
self.check(b, a)
def test_import_module_usage(self):
for old, new in self.modules.items():
b = """
import %s
foo(%s.bar)
""" % (old, old)
a = """
import %s
foo(%s.bar)
""" % (new, new)
self.check(b, a)
b = """
from %s import x
%s = 23
""" % (old, old)
a = """
from %s import x
%s = 23
""" % (new, old)
self.check(b, a)
s = """
def f():
%s.method()
""" % (old,)
self.unchanged(s)
# test nested usage
b = """
import %s
%s.bar(%s.foo)
""" % (old, old, old)
a = """
import %s
%s.bar(%s.foo)
""" % (new, new, new)
self.check(b, a)
b = """
import %s
x.%s
""" % (old, old)
a = """
import %s
x.%s
""" % (new, old)
self.check(b, a)
class Test_imports(FixerTestCase, ImportsFixerTests):
fixer = "imports"
from ..fixes.fix_imports import MAPPING as modules
def test_multiple_imports(self):
b = """import urlparse, cStringIO"""
a = """import urllib.parse, io"""
self.check(b, a)
def test_multiple_imports_as(self):
b = """
import copy_reg as bar, HTMLParser as foo, urlparse
s = urlparse.spam(bar.foo())
"""
a = """
import copyreg as bar, html.parser as foo, urllib.parse
s = urllib.parse.spam(bar.foo())
"""
self.check(b, a)
class Test_imports2(FixerTestCase, ImportsFixerTests):
fixer = "imports2"
from ..fixes.fix_imports2 import MAPPING as modules
class Test_imports_fixer_order(FixerTestCase, ImportsFixerTests):
def setUp(self):
super(Test_imports_fixer_order, self).setUp(['imports', 'imports2'])
from ..fixes.fix_imports2 import MAPPING as mapping2
self.modules = mapping2.copy()
from ..fixes.fix_imports import MAPPING as mapping1
for key in ('dbhash', 'dumbdbm', 'dbm', 'gdbm'):
self.modules[key] = mapping1[key]
def test_after_local_imports_refactoring(self):
for fix in ("imports", "imports2"):
self.fixer = fix
self.assert_runs_after("import")
class Test_urllib(FixerTestCase):
fixer = "urllib"
from ..fixes.fix_urllib import MAPPING as modules
def test_import_module(self):
for old, changes in self.modules.items():
b = "import %s" % old
a = "import %s" % ", ".join(map(itemgetter(0), changes))
self.check(b, a)
def test_import_from(self):
for old, changes in self.modules.items():
all_members = []
for new, members in changes:
for member in members:
all_members.append(member)
b = "from %s import %s" % (old, member)
a = "from %s import %s" % (new, member)
self.check(b, a)
s = "from foo import %s" % member
self.unchanged(s)
b = "from %s import %s" % (old, ", ".join(members))
a = "from %s import %s" % (new, ", ".join(members))
self.check(b, a)
s = "from foo import %s" % ", ".join(members)
self.unchanged(s)
# test the breaking of a module into multiple replacements
b = "from %s import %s" % (old, ", ".join(all_members))
a = "\n".join(["from %s import %s" % (new, ", ".join(members))
for (new, members) in changes])
self.check(b, a)
def test_import_module_as(self):
for old in self.modules:
s = "import %s as foo" % old
self.warns_unchanged(s, "This module is now multiple modules")
def test_import_from_as(self):
for old, changes in self.modules.items():
for new, members in changes:
for member in members:
b = "from %s import %s as foo_bar" % (old, member)
a = "from %s import %s as foo_bar" % (new, member)
self.check(b, a)
b = "from %s import %s as blah, %s" % (old, member, member)
a = "from %s import %s as blah, %s" % (new, member, member)
self.check(b, a)
def test_star(self):
for old in self.modules:
s = "from %s import *" % old
self.warns_unchanged(s, "Cannot handle star imports")
def test_indented(self):
b = """
def foo():
from urllib import urlencode, urlopen
"""
a = """
def foo():
from urllib.parse import urlencode
from urllib.request import urlopen
"""
self.check(b, a)
b = """
def foo():
other()
from urllib import urlencode, urlopen
"""
a = """
def foo():
other()
from urllib.parse import urlencode
from urllib.request import urlopen
"""
self.check(b, a)
def test_import_module_usage(self):
for old, changes in self.modules.items():
for new, members in changes:
for member in members:
new_import = ", ".join([n for (n, mems)
in self.modules[old]])
b = """
import %s
foo(%s.%s)
""" % (old, old, member)
a = """
import %s
foo(%s.%s)
""" % (new_import, new, member)
self.check(b, a)
b = """
import %s
%s.%s(%s.%s)
""" % (old, old, member, old, member)
a = """
import %s
%s.%s(%s.%s)
""" % (new_import, new, member, new, member)
self.check(b, a)
class Test_input(FixerTestCase):
fixer = "input"
def test_prefix_preservation(self):
b = """x = input( )"""
a = """x = eval(input( ))"""
self.check(b, a)
b = """x = input( '' )"""
a = """x = eval(input( '' ))"""
self.check(b, a)
def test_trailing_comment(self):
b = """x = input() # foo"""
a = """x = eval(input()) # foo"""
self.check(b, a)
def test_idempotency(self):
s = """x = eval(input())"""
self.unchanged(s)
s = """x = eval(input(''))"""
self.unchanged(s)
s = """x = eval(input(foo(5) + 9))"""
self.unchanged(s)
def test_1(self):
b = """x = input()"""
a = """x = eval(input())"""
self.check(b, a)
def test_2(self):
b = """x = input('')"""
a = """x = eval(input(''))"""
self.check(b, a)
def test_3(self):
b = """x = input('prompt')"""
a = """x = eval(input('prompt'))"""
self.check(b, a)
def test_4(self):
b = """x = input(foo(5) + 9)"""
a = """x = eval(input(foo(5) + 9))"""
self.check(b, a)
class Test_tuple_params(FixerTestCase):
fixer = "tuple_params"
def test_unchanged_1(self):
s = """def foo(): pass"""
self.unchanged(s)
def test_unchanged_2(self):
s = """def foo(a, b, c): pass"""
self.unchanged(s)
def test_unchanged_3(self):
s = """def foo(a=3, b=4, c=5): pass"""
self.unchanged(s)
def test_1(self):
b = """
def foo(((a, b), c)):
x = 5"""
a = """
def foo(xxx_todo_changeme):
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_2(self):
b = """
def foo(((a, b), c), d):
x = 5"""
a = """
def foo(xxx_todo_changeme, d):
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_3(self):
b = """
def foo(((a, b), c), d) -> e:
x = 5"""
a = """
def foo(xxx_todo_changeme, d) -> e:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_semicolon(self):
b = """
def foo(((a, b), c)): x = 5; y = 7"""
a = """
def foo(xxx_todo_changeme): ((a, b), c) = xxx_todo_changeme; x = 5; y = 7"""
self.check(b, a)
def test_keywords(self):
b = """
def foo(((a, b), c), d, e=5) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, d, e=5) -> z:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_varargs(self):
b = """
def foo(((a, b), c), d, *vargs, **kwargs) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, d, *vargs, **kwargs) -> z:
((a, b), c) = xxx_todo_changeme
x = 5"""
self.check(b, a)
def test_multi_1(self):
b = """
def foo(((a, b), c), (d, e, f)) -> z:
x = 5"""
a = """
def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
((a, b), c) = xxx_todo_changeme
(d, e, f) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_multi_2(self):
b = """
def foo(x, ((a, b), c), d, (e, f, g), y) -> z:
x = 5"""
a = """
def foo(x, xxx_todo_changeme, d, xxx_todo_changeme1, y) -> z:
((a, b), c) = xxx_todo_changeme
(e, f, g) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_docstring(self):
b = """
def foo(((a, b), c), (d, e, f)) -> z:
"foo foo foo foo"
x = 5"""
a = """
def foo(xxx_todo_changeme, xxx_todo_changeme1) -> z:
"foo foo foo foo"
((a, b), c) = xxx_todo_changeme
(d, e, f) = xxx_todo_changeme1
x = 5"""
self.check(b, a)
def test_lambda_no_change(self):
s = """lambda x: x + 5"""
self.unchanged(s)
def test_lambda_parens_single_arg(self):
b = """lambda (x): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda(x): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda ((((x)))): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
b = """lambda((((x)))): x + 5"""
a = """lambda x: x + 5"""
self.check(b, a)
def test_lambda_simple(self):
b = """lambda (x, y): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda(x, y): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda (((x, y))): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
b = """lambda(((x, y))): x + f(y)"""
a = """lambda x_y: x_y[0] + f(x_y[1])"""
self.check(b, a)
def test_lambda_one_tuple(self):
b = """lambda (x,): x + f(x)"""
a = """lambda x1: x1[0] + f(x1[0])"""
self.check(b, a)
b = """lambda (((x,))): x + f(x)"""
a = """lambda x1: x1[0] + f(x1[0])"""
self.check(b, a)
def test_lambda_simple_multi_use(self):
b = """lambda (x, y): x + x + f(x) + x"""
a = """lambda x_y: x_y[0] + x_y[0] + f(x_y[0]) + x_y[0]"""
self.check(b, a)
def test_lambda_simple_reverse(self):
b = """lambda (x, y): y + x"""
a = """lambda x_y: x_y[1] + x_y[0]"""
self.check(b, a)
def test_lambda_nested(self):
b = """lambda (x, (y, z)): x + y + z"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
self.check(b, a)
b = """lambda (((x, (y, z)))): x + y + z"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + x_y_z[1][1]"""
self.check(b, a)
def test_lambda_nested_multi_use(self):
b = """lambda (x, (y, z)): x + y + f(y)"""
a = """lambda x_y_z: x_y_z[0] + x_y_z[1][0] + f(x_y_z[1][0])"""
self.check(b, a)
class Test_methodattrs(FixerTestCase):
fixer = "methodattrs"
attrs = ["func", "self", "class"]
def test(self):
for attr in self.attrs:
b = "a.im_%s" % attr
if attr == "class":
a = "a.__self__.__class__"
else:
a = "a.__%s__" % attr
self.check(b, a)
b = "self.foo.im_%s.foo_bar" % attr
if attr == "class":
a = "self.foo.__self__.__class__.foo_bar"
else:
a = "self.foo.__%s__.foo_bar" % attr
self.check(b, a)
def test_unchanged(self):
for attr in self.attrs:
s = "foo(im_%s + 5)" % attr
self.unchanged(s)
s = "f(foo.__%s__)" % attr
self.unchanged(s)
s = "f(foo.__%s__.foo)" % attr
self.unchanged(s)
class Test_next(FixerTestCase):
fixer = "next"
def test_1(self):
b = """it.next()"""
a = """next(it)"""
self.check(b, a)
def test_2(self):
b = """a.b.c.d.next()"""
a = """next(a.b.c.d)"""
self.check(b, a)
def test_3(self):
b = """(a + b).next()"""
a = """next((a + b))"""
self.check(b, a)
def test_4(self):
b = """a().next()"""
a = """next(a())"""
self.check(b, a)
def test_5(self):
b = """a().next() + b"""
a = """next(a()) + b"""
self.check(b, a)
def test_6(self):
b = """c( a().next() + b)"""
a = """c( next(a()) + b)"""
self.check(b, a)
def test_prefix_preservation_1(self):
b = """
for a in b:
foo(a)
a.next()
"""
a = """
for a in b:
foo(a)
next(a)
"""
self.check(b, a)
def test_prefix_preservation_2(self):
b = """
for a in b:
foo(a) # abc
# def
a.next()
"""
a = """
for a in b:
foo(a) # abc
# def
next(a)
"""
self.check(b, a)
def test_prefix_preservation_3(self):
b = """
next = 5
for a in b:
foo(a)
a.next()
"""
a = """
next = 5
for a in b:
foo(a)
a.__next__()
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_4(self):
b = """
next = 5
for a in b:
foo(a) # abc
# def
a.next()
"""
a = """
next = 5
for a in b:
foo(a) # abc
# def
a.__next__()
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_5(self):
b = """
next = 5
for a in b:
foo(foo(a), # abc
a.next())
"""
a = """
next = 5
for a in b:
foo(foo(a), # abc
a.__next__())
"""
self.check(b, a, ignore_warnings=True)
def test_prefix_preservation_6(self):
b = """
for a in b:
foo(foo(a), # abc
a.next())
"""
a = """
for a in b:
foo(foo(a), # abc
next(a))
"""
self.check(b, a)
def test_method_1(self):
b = """
class A:
def next(self):
pass
"""
a = """
class A:
def __next__(self):
pass
"""
self.check(b, a)
def test_method_2(self):
b = """
class A(object):
def next(self):
pass
"""
a = """
class A(object):
def __next__(self):
pass
"""
self.check(b, a)
def test_method_3(self):
b = """
class A:
def next(x):
pass
"""
a = """
class A:
def __next__(x):
pass
"""
self.check(b, a)
def test_method_4(self):
b = """
class A:
def __init__(self, foo):
self.foo = foo
def next(self):
pass
def __iter__(self):
return self
"""
a = """
class A:
def __init__(self, foo):
self.foo = foo
def __next__(self):
pass
def __iter__(self):
return self
"""
self.check(b, a)
def test_method_unchanged(self):
s = """
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_shadowing_assign_simple(self):
s = """
next = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_tuple_1(self):
s = """
(next, a) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_tuple_2(self):
s = """
(a, (b, (next, c)), a) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_list_1(self):
s = """
[next, a] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_assign_list_2(self):
s = """
[a, [b, [next, c]], a] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign(self):
s = """
def foo():
__builtin__.next = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign_in_tuple(self):
s = """
def foo():
(a, __builtin__.next) = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_builtin_assign_in_list(self):
s = """
def foo():
[a, __builtin__.next] = foo
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_assign_to_next(self):
s = """
def foo():
A.next = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_assign_to_next_in_tuple(self):
s = """
def foo():
(a, A.next) = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_assign_to_next_in_list(self):
s = """
def foo():
[a, A.next] = foo
class A:
def next(self, a, b):
pass
"""
self.unchanged(s)
def test_shadowing_import_1(self):
s = """
import foo.bar as next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_2(self):
s = """
import bar, bar.foo as next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_3(self):
s = """
import bar, bar.foo as next, baz
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_1(self):
s = """
from x import next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_2(self):
s = """
from x.a import next
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_3(self):
s = """
from x import a, next, b
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_import_from_4(self):
s = """
from x.a import a, next, b
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_funcdef_1(self):
s = """
def next(a):
pass
class A:
def next(self, a, b):
pass
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_funcdef_2(self):
b = """
def next(a):
pass
class A:
def next(self):
pass
it.next()
"""
a = """
def next(a):
pass
class A:
def __next__(self):
pass
it.__next__()
"""
self.warns(b, a, "Calls to builtin next() possibly shadowed")
def test_shadowing_global_1(self):
s = """
def f():
global next
next = 5
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_global_2(self):
s = """
def f():
global a, next, b
next = 5
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_simple(self):
s = """
for next in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_tuple_1(self):
s = """
for next, b in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_shadowing_for_tuple_2(self):
s = """
for a, (next, c), b in it():
pass
b = 5
c = 6
"""
self.warns_unchanged(s, "Calls to builtin next() possibly shadowed")
def test_noncall_access_1(self):
b = """gnext = g.next"""
a = """gnext = g.__next__"""
self.check(b, a)
def test_noncall_access_2(self):
b = """f(g.next + 5)"""
a = """f(g.__next__ + 5)"""
self.check(b, a)
def test_noncall_access_3(self):
b = """f(g().next + 5)"""
a = """f(g().__next__ + 5)"""
self.check(b, a)
class Test_nonzero(FixerTestCase):
fixer = "nonzero"
def test_1(self):
b = """
class A:
def __nonzero__(self):
pass
"""
a = """
class A:
def __bool__(self):
pass
"""
self.check(b, a)
def test_2(self):
b = """
class A(object):
def __nonzero__(self):
pass
"""
a = """
class A(object):
def __bool__(self):
pass
"""
self.check(b, a)
def test_unchanged_1(self):
s = """
class A(object):
def __bool__(self):
pass
"""
self.unchanged(s)
def test_unchanged_2(self):
s = """
class A(object):
def __nonzero__(self, a):
pass
"""
self.unchanged(s)
def test_unchanged_func(self):
s = """
def __nonzero__(self):
pass
"""
self.unchanged(s)
class Test_numliterals(FixerTestCase):
fixer = "numliterals"
def test_octal_1(self):
b = """0755"""
a = """0o755"""
self.check(b, a)
def test_long_int_1(self):
b = """a = 12L"""
a = """a = 12"""
self.check(b, a)
def test_long_int_2(self):
b = """a = 12l"""
a = """a = 12"""
self.check(b, a)
def test_long_hex(self):
b = """b = 0x12l"""
a = """b = 0x12"""
self.check(b, a)
def test_comments_and_spacing(self):
b = """b = 0x12L"""
a = """b = 0x12"""
self.check(b, a)
b = """b = 0755 # spam"""
a = """b = 0o755 # spam"""
self.check(b, a)
def test_unchanged_int(self):
s = """5"""
self.unchanged(s)
def test_unchanged_float(self):
s = """5.0"""
self.unchanged(s)
def test_unchanged_octal(self):
s = """0o755"""
self.unchanged(s)
def test_unchanged_hex(self):
s = """0xABC"""
self.unchanged(s)
def test_unchanged_exp(self):
s = """5.0e10"""
self.unchanged(s)
def test_unchanged_complex_int(self):
s = """5 + 4j"""
self.unchanged(s)
def test_unchanged_complex_float(self):
s = """5.4 + 4.9j"""
self.unchanged(s)
def test_unchanged_complex_bare(self):
s = """4j"""
self.unchanged(s)
s = """4.4j"""
self.unchanged(s)
class Test_renames(FixerTestCase):
fixer = "renames"
modules = {"sys": ("maxint", "maxsize"),
}
def test_import_from(self):
for mod, (old, new) in self.modules.items():
b = "from %s import %s" % (mod, old)
a = "from %s import %s" % (mod, new)
self.check(b, a)
s = "from foo import %s" % old
self.unchanged(s)
def test_import_from_as(self):
for mod, (old, new) in self.modules.items():
b = "from %s import %s as foo_bar" % (mod, old)
a = "from %s import %s as foo_bar" % (mod, new)
self.check(b, a)
def test_import_module_usage(self):
for mod, (old, new) in self.modules.items():
b = """
import %s
foo(%s, %s.%s)
""" % (mod, mod, mod, old)
a = """
import %s
foo(%s, %s.%s)
""" % (mod, mod, mod, new)
self.check(b, a)
def XXX_test_from_import_usage(self):
# not implemented yet
for mod, (old, new) in self.modules.items():
b = """
from %s import %s
foo(%s, %s)
""" % (mod, old, mod, old)
a = """
from %s import %s
foo(%s, %s)
""" % (mod, new, mod, new)
self.check(b, a)
class Test_unicode(FixerTestCase):
fixer = "unicode"
def test_whitespace(self):
b = """unicode( x)"""
a = """str( x)"""
self.check(b, a)
b = """ unicode(x )"""
a = """ str(x )"""
self.check(b, a)
b = """ u'h'"""
a = """ 'h'"""
self.check(b, a)
def test_unicode_call(self):
b = """unicode(x, y, z)"""
a = """str(x, y, z)"""
self.check(b, a)
def test_unichr(self):
b = """unichr(u'h')"""
a = """chr('h')"""
self.check(b, a)
def test_unicode_literal_1(self):
b = '''u"x"'''
a = '''"x"'''
self.check(b, a)
def test_unicode_literal_2(self):
b = """ur'x'"""
a = """r'x'"""
self.check(b, a)
def test_unicode_literal_3(self):
b = """UR'''x''' """
a = """R'''x''' """
self.check(b, a)
def test_native_literal_escape_u(self):
b = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """'\\\\\\\\u20ac\\\\U0001d121\\\\u20ac'"""
self.check(b, a)
b = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_bytes_literal_escape_u(self):
b = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """b'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
b = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """br'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_unicode_literal_escape_u(self):
b = """u'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
b = """ur'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
def test_native_unicode_literal_escape_u(self):
f = 'from __future__ import unicode_literals\n'
b = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = f + """'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
b = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
a = f + """r'\\\\\\u20ac\\U0001d121\\\\u20ac'"""
self.check(b, a)
class Test_callable(FixerTestCase):
fixer = "callable"
def test_prefix_preservation(self):
b = """callable( x)"""
a = """import collections\nisinstance( x, collections.Callable)"""
self.check(b, a)
b = """if callable(x): pass"""
a = """import collections
if isinstance(x, collections.Callable): pass"""
self.check(b, a)
def test_callable_call(self):
b = """callable(x)"""
a = """import collections\nisinstance(x, collections.Callable)"""
self.check(b, a)
def test_global_import(self):
b = """
def spam(foo):
callable(foo)"""[1:]
a = """
import collections
def spam(foo):
isinstance(foo, collections.Callable)"""[1:]
self.check(b, a)
b = """
import collections
def spam(foo):
callable(foo)"""[1:]
# same output if it was already imported
self.check(b, a)
b = """
from collections import *
def spam(foo):
callable(foo)"""[1:]
a = """
from collections import *
import collections
def spam(foo):
isinstance(foo, collections.Callable)"""[1:]
self.check(b, a)
b = """
do_stuff()
do_some_other_stuff()
assert callable(do_stuff)"""[1:]
a = """
import collections
do_stuff()
do_some_other_stuff()
assert isinstance(do_stuff, collections.Callable)"""[1:]
self.check(b, a)
b = """
if isinstance(do_stuff, Callable):
assert callable(do_stuff)
do_stuff(do_stuff)
if not callable(do_stuff):
exit(1)
else:
assert callable(do_stuff)
else:
assert not callable(do_stuff)"""[1:]
a = """
import collections
if isinstance(do_stuff, Callable):
assert isinstance(do_stuff, collections.Callable)
do_stuff(do_stuff)
if not isinstance(do_stuff, collections.Callable):
exit(1)
else:
assert isinstance(do_stuff, collections.Callable)
else:
assert not isinstance(do_stuff, collections.Callable)"""[1:]
self.check(b, a)
def test_callable_should_not_change(self):
a = """callable(*x)"""
self.unchanged(a)
a = """callable(x, y)"""
self.unchanged(a)
a = """callable(x, kw=y)"""
self.unchanged(a)
a = """callable()"""
self.unchanged(a)
class Test_filter(FixerTestCase):
fixer = "filter"
def test_prefix_preservation(self):
b = """x = filter( foo, 'abc' )"""
a = """x = list(filter( foo, 'abc' ))"""
self.check(b, a)
b = """x = filter( None , 'abc' )"""
a = """x = [_f for _f in 'abc' if _f]"""
self.check(b, a)
def test_filter_basic(self):
b = """x = filter(None, 'abc')"""
a = """x = [_f for _f in 'abc' if _f]"""
self.check(b, a)
b = """x = len(filter(f, 'abc'))"""
a = """x = len(list(filter(f, 'abc')))"""
self.check(b, a)
b = """x = filter(lambda x: x%2 == 0, range(10))"""
a = """x = [x for x in range(10) if x%2 == 0]"""
self.check(b, a)
# Note the parens around x
b = """x = filter(lambda (x): x%2 == 0, range(10))"""
a = """x = [x for x in range(10) if x%2 == 0]"""
self.check(b, a)
# XXX This (rare) case is not supported
## b = """x = filter(f, 'abc')[0]"""
## a = """x = list(filter(f, 'abc'))[0]"""
## self.check(b, a)
def test_filter_nochange(self):
a = """b.join(filter(f, 'abc'))"""
self.unchanged(a)
a = """(a + foo(5)).join(filter(f, 'abc'))"""
self.unchanged(a)
a = """iter(filter(f, 'abc'))"""
self.unchanged(a)
a = """list(filter(f, 'abc'))"""
self.unchanged(a)
a = """list(filter(f, 'abc'))[0]"""
self.unchanged(a)
a = """set(filter(f, 'abc'))"""
self.unchanged(a)
a = """set(filter(f, 'abc')).pop()"""
self.unchanged(a)
a = """tuple(filter(f, 'abc'))"""
self.unchanged(a)
a = """any(filter(f, 'abc'))"""
self.unchanged(a)
a = """all(filter(f, 'abc'))"""
self.unchanged(a)
a = """sum(filter(f, 'abc'))"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'))"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'), key=blah)"""
self.unchanged(a)
a = """sorted(filter(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
a = """enumerate(filter(f, 'abc'))"""
self.unchanged(a)
a = """enumerate(filter(f, 'abc'), start=1)"""
self.unchanged(a)
a = """for i in filter(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in filter(f, 'abc')]"""
self.unchanged(a)
a = """(x for x in filter(f, 'abc'))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, filter; filter(f, 'ham')"
self.unchanged(a)
b = """from future_builtins import spam; x = filter(f, 'abc')"""
a = """from future_builtins import spam; x = list(filter(f, 'abc'))"""
self.check(b, a)
a = "from future_builtins import *; filter(f, 'ham')"
self.unchanged(a)
class Test_map(FixerTestCase):
fixer = "map"
def check(self, b, a):
self.unchanged("from future_builtins import map; " + b, a)
super(Test_map, self).check(b, a)
def test_prefix_preservation(self):
b = """x = map( f, 'abc' )"""
a = """x = list(map( f, 'abc' ))"""
self.check(b, a)
def test_trailing_comment(self):
b = """x = map(f, 'abc') # foo"""
a = """x = list(map(f, 'abc')) # foo"""
self.check(b, a)
def test_None_with_multiple_arguments(self):
s = """x = map(None, a, b, c)"""
self.warns_unchanged(s, "cannot convert map(None, ...) with "
"multiple arguments")
def test_map_basic(self):
b = """x = map(f, 'abc')"""
a = """x = list(map(f, 'abc'))"""
self.check(b, a)
b = """x = len(map(f, 'abc', 'def'))"""
a = """x = len(list(map(f, 'abc', 'def')))"""
self.check(b, a)
b = """x = map(None, 'abc')"""
a = """x = list('abc')"""
self.check(b, a)
b = """x = map(lambda x: x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
# Note the parens around x
b = """x = map(lambda (x): x+1, range(4))"""
a = """x = [x+1 for x in range(4)]"""
self.check(b, a)
b = """
foo()
# foo
map(f, x)
"""
a = """
foo()
# foo
list(map(f, x))
"""
self.warns(b, a, "You should use a for loop here")
# XXX This (rare) case is not supported
## b = """x = map(f, 'abc')[0]"""
## a = """x = list(map(f, 'abc'))[0]"""
## self.check(b, a)
def test_map_nochange(self):
a = """b.join(map(f, 'abc'))"""
self.unchanged(a)
a = """(a + foo(5)).join(map(f, 'abc'))"""
self.unchanged(a)
a = """iter(map(f, 'abc'))"""
self.unchanged(a)
a = """list(map(f, 'abc'))"""
self.unchanged(a)
a = """list(map(f, 'abc'))[0]"""
self.unchanged(a)
a = """set(map(f, 'abc'))"""
self.unchanged(a)
a = """set(map(f, 'abc')).pop()"""
self.unchanged(a)
a = """tuple(map(f, 'abc'))"""
self.unchanged(a)
a = """any(map(f, 'abc'))"""
self.unchanged(a)
a = """all(map(f, 'abc'))"""
self.unchanged(a)
a = """sum(map(f, 'abc'))"""
self.unchanged(a)
a = """sorted(map(f, 'abc'))"""
self.unchanged(a)
a = """sorted(map(f, 'abc'), key=blah)"""
self.unchanged(a)
a = """sorted(map(f, 'abc'), key=blah)[0]"""
self.unchanged(a)
a = """enumerate(map(f, 'abc'))"""
self.unchanged(a)
a = """enumerate(map(f, 'abc'), start=1)"""
self.unchanged(a)
a = """for i in map(f, 'abc'): pass"""
self.unchanged(a)
a = """[x for x in map(f, 'abc')]"""
self.unchanged(a)
a = """(x for x in map(f, 'abc'))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, map, eggs; map(f, 'ham')"
self.unchanged(a)
b = """from future_builtins import spam, eggs; x = map(f, 'abc')"""
a = """from future_builtins import spam, eggs; x = list(map(f, 'abc'))"""
self.check(b, a)
a = "from future_builtins import *; map(f, 'ham')"
self.unchanged(a)
class Test_zip(FixerTestCase):
fixer = "zip"
def check(self, b, a):
self.unchanged("from future_builtins import zip; " + b, a)
super(Test_zip, self).check(b, a)
def test_zip_basic(self):
b = """x = zip(a, b, c)"""
a = """x = list(zip(a, b, c))"""
self.check(b, a)
b = """x = len(zip(a, b))"""
a = """x = len(list(zip(a, b)))"""
self.check(b, a)
def test_zip_nochange(self):
a = """b.join(zip(a, b))"""
self.unchanged(a)
a = """(a + foo(5)).join(zip(a, b))"""
self.unchanged(a)
a = """iter(zip(a, b))"""
self.unchanged(a)
a = """list(zip(a, b))"""
self.unchanged(a)
a = """list(zip(a, b))[0]"""
self.unchanged(a)
a = """set(zip(a, b))"""
self.unchanged(a)
a = """set(zip(a, b)).pop()"""
self.unchanged(a)
a = """tuple(zip(a, b))"""
self.unchanged(a)
a = """any(zip(a, b))"""
self.unchanged(a)
a = """all(zip(a, b))"""
self.unchanged(a)
a = """sum(zip(a, b))"""
self.unchanged(a)
a = """sorted(zip(a, b))"""
self.unchanged(a)
a = """sorted(zip(a, b), key=blah)"""
self.unchanged(a)
a = """sorted(zip(a, b), key=blah)[0]"""
self.unchanged(a)
a = """enumerate(zip(a, b))"""
self.unchanged(a)
a = """enumerate(zip(a, b), start=1)"""
self.unchanged(a)
a = """for i in zip(a, b): pass"""
self.unchanged(a)
a = """[x for x in zip(a, b)]"""
self.unchanged(a)
a = """(x for x in zip(a, b))"""
self.unchanged(a)
def test_future_builtins(self):
a = "from future_builtins import spam, zip, eggs; zip(a, b)"
self.unchanged(a)
b = """from future_builtins import spam, eggs; x = zip(a, b)"""
a = """from future_builtins import spam, eggs; x = list(zip(a, b))"""
self.check(b, a)
a = "from future_builtins import *; zip(a, b)"
self.unchanged(a)
class Test_standarderror(FixerTestCase):
fixer = "standarderror"
def test(self):
b = """x = StandardError()"""
a = """x = Exception()"""
self.check(b, a)
b = """x = StandardError(a, b, c)"""
a = """x = Exception(a, b, c)"""
self.check(b, a)
b = """f(2 + StandardError(a, b, c))"""
a = """f(2 + Exception(a, b, c))"""
self.check(b, a)
class Test_types(FixerTestCase):
fixer = "types"
def test_basic_types_convert(self):
b = """types.StringType"""
a = """bytes"""
self.check(b, a)
b = """types.DictType"""
a = """dict"""
self.check(b, a)
b = """types . IntType"""
a = """int"""
self.check(b, a)
b = """types.ListType"""
a = """list"""
self.check(b, a)
b = """types.LongType"""
a = """int"""
self.check(b, a)
b = """types.NoneType"""
a = """type(None)"""
self.check(b, a)
class Test_idioms(FixerTestCase):
fixer = "idioms"
def test_while(self):
b = """while 1: foo()"""
a = """while True: foo()"""
self.check(b, a)
b = """while 1: foo()"""
a = """while True: foo()"""
self.check(b, a)
b = """
while 1:
foo()
"""
a = """
while True:
foo()
"""
self.check(b, a)
def test_while_unchanged(self):
s = """while 11: foo()"""
self.unchanged(s)
s = """while 0: foo()"""
self.unchanged(s)
s = """while foo(): foo()"""
self.unchanged(s)
s = """while []: foo()"""
self.unchanged(s)
def test_eq_simple(self):
b = """type(x) == T"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if type(x) == T: pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_eq_reverse(self):
b = """T == type(x)"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if T == type(x): pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_eq_expression(self):
b = """type(x+y) == d.get('T')"""
a = """isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) == d.get('T')"""
a = """isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_is_simple(self):
b = """type(x) is T"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if type(x) is T: pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_is_reverse(self):
b = """T is type(x)"""
a = """isinstance(x, T)"""
self.check(b, a)
b = """if T is type(x): pass"""
a = """if isinstance(x, T): pass"""
self.check(b, a)
def test_is_expression(self):
b = """type(x+y) is d.get('T')"""
a = """isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) is d.get('T')"""
a = """isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_is_not_simple(self):
b = """type(x) is not T"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if type(x) is not T: pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_is_not_reverse(self):
b = """T is not type(x)"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if T is not type(x): pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_is_not_expression(self):
b = """type(x+y) is not d.get('T')"""
a = """not isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) is not d.get('T')"""
a = """not isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_ne_simple(self):
b = """type(x) != T"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if type(x) != T: pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_ne_reverse(self):
b = """T != type(x)"""
a = """not isinstance(x, T)"""
self.check(b, a)
b = """if T != type(x): pass"""
a = """if not isinstance(x, T): pass"""
self.check(b, a)
def test_ne_expression(self):
b = """type(x+y) != d.get('T')"""
a = """not isinstance(x+y, d.get('T'))"""
self.check(b, a)
b = """type( x + y) != d.get('T')"""
a = """not isinstance(x + y, d.get('T'))"""
self.check(b, a)
def test_type_unchanged(self):
a = """type(x).__name__"""
self.unchanged(a)
def test_sort_list_call(self):
b = """
v = list(t)
v.sort()
foo(v)
"""
a = """
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = list(foo(b) + d)
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b) + d)
foo(v)
"""
self.check(b, a)
b = """
while x:
v = list(t)
v.sort()
foo(v)
"""
a = """
while x:
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = list(t)
# foo
v.sort()
foo(v)
"""
a = """
v = sorted(t)
# foo
foo(v)
"""
self.check(b, a)
b = r"""
v = list( t)
v.sort()
foo(v)
"""
a = r"""
v = sorted( t)
foo(v)
"""
self.check(b, a)
b = r"""
try:
m = list(s)
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
except: pass
"""
self.check(b, a)
b = r"""
try:
m = list(s)
# foo
m.sort()
except: pass
"""
a = r"""
try:
m = sorted(s)
# foo
except: pass
"""
self.check(b, a)
b = r"""
m = list(s)
# more comments
m.sort()"""
a = r"""
m = sorted(s)
# more comments"""
self.check(b, a)
def test_sort_simple_expr(self):
b = """
v = t
v.sort()
foo(v)
"""
a = """
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = foo(b)
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b))
foo(v)
"""
self.check(b, a)
b = """
v = b.keys()
v.sort()
foo(v)
"""
a = """
v = sorted(b.keys())
foo(v)
"""
self.check(b, a)
b = """
v = foo(b) + d
v.sort()
foo(v)
"""
a = """
v = sorted(foo(b) + d)
foo(v)
"""
self.check(b, a)
b = """
while x:
v = t
v.sort()
foo(v)
"""
a = """
while x:
v = sorted(t)
foo(v)
"""
self.check(b, a)
b = """
v = t
# foo
v.sort()
foo(v)
"""
a = """
v = sorted(t)
# foo
foo(v)
"""
self.check(b, a)
b = r"""
v = t
v.sort()
foo(v)
"""
a = r"""
v = sorted(t)
foo(v)
"""
self.check(b, a)
def test_sort_unchanged(self):
s = """
v = list(t)
w.sort()
foo(w)
"""
self.unchanged(s)
s = """
v = list(t)
v.sort(u)
foo(v)
"""
self.unchanged(s)
class Test_basestring(FixerTestCase):
fixer = "basestring"
def test_basestring(self):
b = """isinstance(x, basestring)"""
a = """isinstance(x, str)"""
self.check(b, a)
class Test_buffer(FixerTestCase):
fixer = "buffer"
def test_buffer(self):
b = """x = buffer(y)"""
a = """x = memoryview(y)"""
self.check(b, a)
def test_slicing(self):
b = """buffer(y)[4:5]"""
a = """memoryview(y)[4:5]"""
self.check(b, a)
class Test_future(FixerTestCase):
fixer = "future"
def test_future(self):
b = """from __future__ import braces"""
a = """"""
self.check(b, a)
b = """# comment\nfrom __future__ import braces"""
a = """# comment\n"""
self.check(b, a)
b = """from __future__ import braces\n# comment"""
a = """\n# comment"""
self.check(b, a)
def test_run_order(self):
self.assert_runs_after('print')
class Test_itertools(FixerTestCase):
fixer = "itertools"
def checkall(self, before, after):
# Because we need to check with and without the itertools prefix
# and on each of the three functions, these loops make it all
# much easier
for i in ('itertools.', ''):
for f in ('map', 'filter', 'zip'):
b = before %(i+'i'+f)
a = after %(f)
self.check(b, a)
def test_0(self):
# A simple example -- test_1 covers exactly the same thing,
# but it's not quite as clear.
b = "itertools.izip(a, b)"
a = "zip(a, b)"
self.check(b, a)
def test_1(self):
b = """%s(f, a)"""
a = """%s(f, a)"""
self.checkall(b, a)
def test_qualified(self):
b = """itertools.ifilterfalse(a, b)"""
a = """itertools.filterfalse(a, b)"""
self.check(b, a)
b = """itertools.izip_longest(a, b)"""
a = """itertools.zip_longest(a, b)"""
self.check(b, a)
def test_2(self):
b = """ifilterfalse(a, b)"""
a = """filterfalse(a, b)"""
self.check(b, a)
b = """izip_longest(a, b)"""
a = """zip_longest(a, b)"""
self.check(b, a)
def test_space_1(self):
b = """ %s(f, a)"""
a = """ %s(f, a)"""
self.checkall(b, a)
def test_space_2(self):
b = """ itertools.ifilterfalse(a, b)"""
a = """ itertools.filterfalse(a, b)"""
self.check(b, a)
b = """ itertools.izip_longest(a, b)"""
a = """ itertools.zip_longest(a, b)"""
self.check(b, a)
def test_run_order(self):
self.assert_runs_after('map', 'zip', 'filter')
class Test_itertools_imports(FixerTestCase):
fixer = 'itertools_imports'
def test_reduced(self):
b = "from itertools import imap, izip, foo"
a = "from itertools import foo"
self.check(b, a)
b = "from itertools import bar, imap, izip, foo"
a = "from itertools import bar, foo"
self.check(b, a)
b = "from itertools import chain, imap, izip"
a = "from itertools import chain"
self.check(b, a)
def test_comments(self):
b = "#foo\nfrom itertools import imap, izip"
a = "#foo\n"
self.check(b, a)
def test_none(self):
b = "from itertools import imap, izip"
a = ""
self.check(b, a)
b = "from itertools import izip"
a = ""
self.check(b, a)
def test_import_as(self):
b = "from itertools import izip, bar as bang, imap"
a = "from itertools import bar as bang"
self.check(b, a)
b = "from itertools import izip as _zip, imap, bar"
a = "from itertools import bar"
self.check(b, a)
b = "from itertools import imap as _map"
a = ""
self.check(b, a)
b = "from itertools import imap as _map, izip as _zip"
a = ""
self.check(b, a)
s = "from itertools import bar as bang"
self.unchanged(s)
def test_ifilter_and_zip_longest(self):
for name in "filterfalse", "zip_longest":
b = "from itertools import i%s" % (name,)
a = "from itertools import %s" % (name,)
self.check(b, a)
b = "from itertools import imap, i%s, foo" % (name,)
a = "from itertools import %s, foo" % (name,)
self.check(b, a)
b = "from itertools import bar, i%s, foo" % (name,)
a = "from itertools import bar, %s, foo" % (name,)
self.check(b, a)
def test_import_star(self):
s = "from itertools import *"
self.unchanged(s)
def test_unchanged(self):
s = "from itertools import foo"
self.unchanged(s)
class Test_import(FixerTestCase):
fixer = "import"
def setUp(self):
super(Test_import, self).setUp()
# Need to replace fix_import's exists method
# so we can check that it's doing the right thing
self.files_checked = []
self.present_files = set()
self.always_exists = True
def fake_exists(name):
self.files_checked.append(name)
return self.always_exists or (name in self.present_files)
from lib2to3.fixes import fix_import
fix_import.exists = fake_exists
def tearDown(self):
from lib2to3.fixes import fix_import
fix_import.exists = os.path.exists
def check_both(self, b, a):
self.always_exists = True
super(Test_import, self).check(b, a)
self.always_exists = False
super(Test_import, self).unchanged(b)
def test_files_checked(self):
def p(path):
# Takes a unix path and returns a path with correct separators
return os.path.pathsep.join(path.split("/"))
self.always_exists = False
self.present_files = set(['__init__.py'])
expected_extensions = ('.py', os.path.sep, '.pyc', '.so', '.sl', '.pyd')
names_to_test = (p("/spam/eggs.py"), "ni.py", p("../../shrubbery.py"))
for name in names_to_test:
self.files_checked = []
self.filename = name
self.unchanged("import jam")
if os.path.dirname(name):
name = os.path.dirname(name) + '/jam'
else:
name = 'jam'
expected_checks = set(name + ext for ext in expected_extensions)
expected_checks.add("__init__.py")
self.assertEqual(set(self.files_checked), expected_checks)
def test_not_in_package(self):
s = "import bar"
self.always_exists = False
self.present_files = set(["bar.py"])
self.unchanged(s)
def test_with_absolute_import_enabled(self):
s = "from __future__ import absolute_import\nimport bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar.py"])
self.unchanged(s)
def test_in_package(self):
b = "import bar"
a = "from . import bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar.py"])
self.check(b, a)
def test_import_from_package(self):
b = "import bar"
a = "from . import bar"
self.always_exists = False
self.present_files = set(["__init__.py", "bar" + os.path.sep])
self.check(b, a)
def test_already_relative_import(self):
s = "from . import bar"
self.unchanged(s)
def test_comments_and_indent(self):
b = "import bar # Foo"
a = "from . import bar # Foo"
self.check(b, a)
def test_from(self):
b = "from foo import bar, baz"
a = "from .foo import bar, baz"
self.check_both(b, a)
b = "from foo import bar"
a = "from .foo import bar"
self.check_both(b, a)
b = "from foo import (bar, baz)"
a = "from .foo import (bar, baz)"
self.check_both(b, a)
def test_dotted_from(self):
b = "from green.eggs import ham"
a = "from .green.eggs import ham"
self.check_both(b, a)
def test_from_as(self):
b = "from green.eggs import ham as spam"
a = "from .green.eggs import ham as spam"
self.check_both(b, a)
def test_import(self):
b = "import foo"
a = "from . import foo"
self.check_both(b, a)
b = "import foo, bar"
a = "from . import foo, bar"
self.check_both(b, a)
b = "import foo, bar, x"
a = "from . import foo, bar, x"
self.check_both(b, a)
b = "import x, y, z"
a = "from . import x, y, z"
self.check_both(b, a)
def test_import_as(self):
b = "import foo as x"
a = "from . import foo as x"
self.check_both(b, a)
b = "import a as b, b as c, c as d"
a = "from . import a as b, b as c, c as d"
self.check_both(b, a)
def test_local_and_absolute(self):
self.always_exists = False
self.present_files = set(["foo.py", "__init__.py"])
s = "import foo, bar"
self.warns_unchanged(s, "absolute and local imports together")
def test_dotted_import(self):
b = "import foo.bar"
a = "from . import foo.bar"
self.check_both(b, a)
def test_dotted_import_as(self):
b = "import foo.bar as bang"
a = "from . import foo.bar as bang"
self.check_both(b, a)
def test_prefix(self):
b = """
# prefix
import foo.bar
"""
a = """
# prefix
from . import foo.bar
"""
self.check_both(b, a)
class Test_set_literal(FixerTestCase):
fixer = "set_literal"
def test_basic(self):
b = """set([1, 2, 3])"""
a = """{1, 2, 3}"""
self.check(b, a)
b = """set((1, 2, 3))"""
a = """{1, 2, 3}"""
self.check(b, a)
b = """set((1,))"""
a = """{1}"""
self.check(b, a)
b = """set([1])"""
self.check(b, a)
b = """set((a, b))"""
a = """{a, b}"""
self.check(b, a)
b = """set([a, b])"""
self.check(b, a)
b = """set((a*234, f(args=23)))"""
a = """{a*234, f(args=23)}"""
self.check(b, a)
b = """set([a*23, f(23)])"""
a = """{a*23, f(23)}"""
self.check(b, a)
b = """set([a-234**23])"""
a = """{a-234**23}"""
self.check(b, a)
def test_listcomps(self):
b = """set([x for x in y])"""
a = """{x for x in y}"""
self.check(b, a)
b = """set([x for x in y if x == m])"""
a = """{x for x in y if x == m}"""
self.check(b, a)
b = """set([x for x in y for a in b])"""
a = """{x for x in y for a in b}"""
self.check(b, a)
b = """set([f(x) - 23 for x in y])"""
a = """{f(x) - 23 for x in y}"""
self.check(b, a)
def test_whitespace(self):
b = """set( [1, 2])"""
a = """{1, 2}"""
self.check(b, a)
b = """set([1 , 2])"""
a = """{1 , 2}"""
self.check(b, a)
b = """set([ 1 ])"""
a = """{ 1 }"""
self.check(b, a)
b = """set( [1] )"""
a = """{1}"""
self.check(b, a)
b = """set([ 1, 2 ])"""
a = """{ 1, 2 }"""
self.check(b, a)
b = """set([x for x in y ])"""
a = """{x for x in y }"""
self.check(b, a)
b = """set(
[1, 2]
)
"""
a = """{1, 2}\n"""
self.check(b, a)
def test_comments(self):
b = """set((1, 2)) # Hi"""
a = """{1, 2} # Hi"""
self.check(b, a)
# This isn't optimal behavior, but the fixer is optional.
b = """
# Foo
set( # Bar
(1, 2)
)
"""
a = """
# Foo
{1, 2}
"""
self.check(b, a)
def test_unchanged(self):
s = """set()"""
self.unchanged(s)
s = """set(a)"""
self.unchanged(s)
s = """set(a, b, c)"""
self.unchanged(s)
# Don't transform generators because they might have to be lazy.
s = """set(x for x in y)"""
self.unchanged(s)
s = """set(x for x in y if z)"""
self.unchanged(s)
s = """set(a*823-23**2 + f(23))"""
self.unchanged(s)
class Test_sys_exc(FixerTestCase):
fixer = "sys_exc"
def test_0(self):
b = "sys.exc_type"
a = "sys.exc_info()[0]"
self.check(b, a)
def test_1(self):
b = "sys.exc_value"
a = "sys.exc_info()[1]"
self.check(b, a)
def test_2(self):
b = "sys.exc_traceback"
a = "sys.exc_info()[2]"
self.check(b, a)
def test_3(self):
b = "sys.exc_type # Foo"
a = "sys.exc_info()[0] # Foo"
self.check(b, a)
def test_4(self):
b = "sys. exc_type"
a = "sys. exc_info()[0]"
self.check(b, a)
def test_5(self):
b = "sys .exc_type"
a = "sys .exc_info()[0]"
self.check(b, a)
class Test_paren(FixerTestCase):
fixer = "paren"
def test_0(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_1(self):
b = """[i for i in 1, 2, ]"""
a = """[i for i in (1, 2,) ]"""
self.check(b, a)
def test_2(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_3(self):
b = """[i for i in 1, 2 if i]"""
a = """[i for i in (1, 2) if i]"""
self.check(b, a)
def test_4(self):
b = """[i for i in 1, 2 ]"""
a = """[i for i in (1, 2) ]"""
self.check(b, a)
def test_5(self):
b = """(i for i in 1, 2)"""
a = """(i for i in (1, 2))"""
self.check(b, a)
def test_6(self):
b = """(i for i in 1 ,2 if i)"""
a = """(i for i in (1 ,2) if i)"""
self.check(b, a)
def test_unchanged_0(self):
s = """[i for i in (1, 2)]"""
self.unchanged(s)
def test_unchanged_1(self):
s = """[i for i in foo()]"""
self.unchanged(s)
def test_unchanged_2(self):
s = """[i for i in (1, 2) if nothing]"""
self.unchanged(s)
def test_unchanged_3(self):
s = """(i for i in (1, 2))"""
self.unchanged(s)
def test_unchanged_4(self):
s = """[i for i in m]"""
self.unchanged(s)
class Test_metaclass(FixerTestCase):
fixer = 'metaclass'
def test_unchanged(self):
self.unchanged("class X(): pass")
self.unchanged("class X(object): pass")
self.unchanged("class X(object1, object2): pass")
self.unchanged("class X(object1, object2, object3): pass")
self.unchanged("class X(metaclass=Meta): pass")
self.unchanged("class X(b, arg=23, metclass=Meta): pass")
self.unchanged("class X(b, arg=23, metaclass=Meta, other=42): pass")
s = """
class X:
def __metaclass__(self): pass
"""
self.unchanged(s)
s = """
class X:
a[23] = 74
"""
self.unchanged(s)
def test_comments(self):
b = """
class X:
# hi
__metaclass__ = AppleMeta
"""
a = """
class X(metaclass=AppleMeta):
# hi
pass
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta
# Bedtime!
"""
a = """
class X(metaclass=Meta):
pass
# Bedtime!
"""
self.check(b, a)
def test_meta(self):
# no-parent class, odd body
b = """
class X():
__metaclass__ = Q
pass
"""
a = """
class X(metaclass=Q):
pass
"""
self.check(b, a)
# one parent class, no body
b = """class X(object): __metaclass__ = Q"""
a = """class X(object, metaclass=Q): pass"""
self.check(b, a)
# one parent, simple body
b = """
class X(object):
__metaclass__ = Meta
bar = 7
"""
a = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta; x = 4; g = 23
"""
a = """
class X(metaclass=Meta):
x = 4; g = 23
"""
self.check(b, a)
# one parent, simple body, __metaclass__ last
b = """
class X(object):
bar = 7
__metaclass__ = Meta
"""
a = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
# redefining __metaclass__
b = """
class X():
__metaclass__ = A
__metaclass__ = B
bar = 7
"""
a = """
class X(metaclass=B):
bar = 7
"""
self.check(b, a)
# multiple inheritance, simple body
b = """
class X(clsA, clsB):
__metaclass__ = Meta
bar = 7
"""
a = """
class X(clsA, clsB, metaclass=Meta):
bar = 7
"""
self.check(b, a)
# keywords in the class statement
b = """class m(a, arg=23): __metaclass__ = Meta"""
a = """class m(a, arg=23, metaclass=Meta): pass"""
self.check(b, a)
b = """
class X(expression(2 + 4)):
__metaclass__ = Meta
"""
a = """
class X(expression(2 + 4), metaclass=Meta):
pass
"""
self.check(b, a)
b = """
class X(expression(2 + 4), x**4):
__metaclass__ = Meta
"""
a = """
class X(expression(2 + 4), x**4, metaclass=Meta):
pass
"""
self.check(b, a)
b = """
class X:
__metaclass__ = Meta
save.py = 23
"""
a = """
class X(metaclass=Meta):
save.py = 23
"""
self.check(b, a)
class Test_getcwdu(FixerTestCase):
fixer = 'getcwdu'
def test_basic(self):
b = """os.getcwdu"""
a = """os.getcwd"""
self.check(b, a)
b = """os.getcwdu()"""
a = """os.getcwd()"""
self.check(b, a)
b = """meth = os.getcwdu"""
a = """meth = os.getcwd"""
self.check(b, a)
b = """os.getcwdu(args)"""
a = """os.getcwd(args)"""
self.check(b, a)
def test_comment(self):
b = """os.getcwdu() # Foo"""
a = """os.getcwd() # Foo"""
self.check(b, a)
def test_unchanged(self):
s = """os.getcwd()"""
self.unchanged(s)
s = """getcwdu()"""
self.unchanged(s)
s = """os.getcwdb()"""
self.unchanged(s)
def test_indentation(self):
b = """
if 1:
os.getcwdu()
"""
a = """
if 1:
os.getcwd()
"""
self.check(b, a)
def test_multilation(self):
b = """os .getcwdu()"""
a = """os .getcwd()"""
self.check(b, a)
b = """os. getcwdu"""
a = """os. getcwd"""
self.check(b, a)
b = """os.getcwdu ( )"""
a = """os.getcwd ( )"""
self.check(b, a)
class Test_operator(FixerTestCase):
fixer = "operator"
def test_operator_isCallable(self):
b = "operator.isCallable(x)"
a = "hasattr(x, '__call__')"
self.check(b, a)
def test_operator_sequenceIncludes(self):
b = "operator.sequenceIncludes(x, y)"
a = "operator.contains(x, y)"
self.check(b, a)
b = "operator .sequenceIncludes(x, y)"
a = "operator .contains(x, y)"
self.check(b, a)
b = "operator. sequenceIncludes(x, y)"
a = "operator. contains(x, y)"
self.check(b, a)
def test_operator_isSequenceType(self):
b = "operator.isSequenceType(x)"
a = "import collections\nisinstance(x, collections.Sequence)"
self.check(b, a)
def test_operator_isMappingType(self):
b = "operator.isMappingType(x)"
a = "import collections\nisinstance(x, collections.Mapping)"
self.check(b, a)
def test_operator_isNumberType(self):
b = "operator.isNumberType(x)"
a = "import numbers\nisinstance(x, numbers.Number)"
self.check(b, a)
def test_operator_repeat(self):
b = "operator.repeat(x, n)"
a = "operator.mul(x, n)"
self.check(b, a)
b = "operator .repeat(x, n)"
a = "operator .mul(x, n)"
self.check(b, a)
b = "operator. repeat(x, n)"
a = "operator. mul(x, n)"
self.check(b, a)
def test_operator_irepeat(self):
b = "operator.irepeat(x, n)"
a = "operator.imul(x, n)"
self.check(b, a)
b = "operator .irepeat(x, n)"
a = "operator .imul(x, n)"
self.check(b, a)
b = "operator. irepeat(x, n)"
a = "operator. imul(x, n)"
self.check(b, a)
def test_bare_isCallable(self):
s = "isCallable(x)"
t = "You should use 'hasattr(x, '__call__')' here."
self.warns_unchanged(s, t)
def test_bare_sequenceIncludes(self):
s = "sequenceIncludes(x, y)"
t = "You should use 'operator.contains(x, y)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isSequenceType(self):
s = "isSequenceType(z)"
t = "You should use 'isinstance(z, collections.Sequence)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isMappingType(self):
s = "isMappingType(x)"
t = "You should use 'isinstance(x, collections.Mapping)' here."
self.warns_unchanged(s, t)
def test_bare_operator_isNumberType(self):
s = "isNumberType(y)"
t = "You should use 'isinstance(y, numbers.Number)' here."
self.warns_unchanged(s, t)
def test_bare_operator_repeat(self):
s = "repeat(x, n)"
t = "You should use 'operator.mul(x, n)' here."
self.warns_unchanged(s, t)
def test_bare_operator_irepeat(self):
s = "irepeat(y, 187)"
t = "You should use 'operator.imul(y, 187)' here."
self.warns_unchanged(s, t)
class Test_exitfunc(FixerTestCase):
fixer = "exitfunc"
def test_simple(self):
b = """
import sys
sys.exitfunc = my_atexit
"""
a = """
import sys
import atexit
atexit.register(my_atexit)
"""
self.check(b, a)
def test_names_import(self):
b = """
import sys, crumbs
sys.exitfunc = my_func
"""
a = """
import sys, crumbs, atexit
atexit.register(my_func)
"""
self.check(b, a)
def test_complex_expression(self):
b = """
import sys
sys.exitfunc = do(d)/a()+complex(f=23, g=23)*expression
"""
a = """
import sys
import atexit
atexit.register(do(d)/a()+complex(f=23, g=23)*expression)
"""
self.check(b, a)
def test_comments(self):
b = """
import sys # Foo
sys.exitfunc = f # Blah
"""
a = """
import sys
import atexit # Foo
atexit.register(f) # Blah
"""
self.check(b, a)
b = """
import apples, sys, crumbs, larry # Pleasant comments
sys.exitfunc = func
"""
a = """
import apples, sys, crumbs, larry, atexit # Pleasant comments
atexit.register(func)
"""
self.check(b, a)
def test_in_a_function(self):
b = """
import sys
def f():
sys.exitfunc = func
"""
a = """
import sys
import atexit
def f():
atexit.register(func)
"""
self.check(b, a)
def test_no_sys_import(self):
b = """sys.exitfunc = f"""
a = """atexit.register(f)"""
msg = ("Can't find sys import; Please add an atexit import at the "
"top of your file.")
self.warns(b, a, msg)
def test_unchanged(self):
s = """f(sys.exitfunc)"""
self.unchanged(s)
| gpl-3.0 | -3,671,492,013,689,923,000 | 25.239406 | 90 | 0.425036 | false |
Princu7/open-event-orga-server | app/helpers/data.py | 2 | 42967 | import json
import logging
import os.path
import random
import shutil
import traceback
from datetime import datetime, timedelta
from os import path
from urllib2 import urlopen
from uuid import uuid4
import PIL
import oauth2
from PIL import Image
from flask import flash, url_for, g, current_app
from flask.ext import login
from flask.ext.scrypt import generate_password_hash, generate_random_salt
from flask_socketio import emit
from requests_oauthlib import OAuth2Session
from app.helpers.assets.images import get_image_file_name, get_path_of_temp_url
from app.helpers.cache import cache
from app.helpers.helpers import string_empty, string_not_empty
from app.helpers.notification_email_triggers import trigger_new_session_notifications, \
trigger_session_state_change_notifications
from app.helpers.oauth import OAuth, FbOAuth, InstagramOAuth, TwitterOAuth
from app.helpers.sessions_speakers.speakers import save_speaker
from app.helpers.storage import upload, UPLOAD_PATHS, UploadedFile, upload_local
from app.helpers import helpers as Helper
from app.helpers.data_getter import DataGetter
from app.helpers.system_mails import MAILS
from app.helpers.update_version import VersionUpdater
from app.models import db
from app.models.activity import Activity, ACTIVITIES
from app.models.email_notifications import EmailNotification
from app.models.event import Event, EventsUsers
from app.models.image_sizes import ImageSizes
from app.models.invite import Invite
from app.models.message_settings import MessageSettings
from app.models.notifications import Notification
from app.models.page import Page
from app.models.panel_permissions import PanelPermission
from app.models.permission import Permission
from app.models.role import Role
from app.models.role_invite import RoleInvite
from app.models.service import Service
from app.models.session import Session
from app.models.session_type import SessionType
from app.models.social_link import SocialLink
from app.models.speaker import Speaker
from app.models.system_role import CustomSysRole, UserSystemRole
from app.models.track import Track
from app.models.user import User, ATTENDEE
from app.models.user_detail import UserDetail
from app.models.user_permissions import UserPermission
from app.models.users_events_roles import UsersEventsRoles
class DataManager(object):
"""Main class responsible for DataBase managing"""
@staticmethod
def create_user_notification(user, action, title, message):
"""
Create a User Notification
:param user: User object to send the notification to
:param action: Action being performed
:param title: The message title
:param message: The message
"""
notification = Notification(user=user,
action=action,
title=title,
message=message,
received_at=datetime.now())
saved = save_to_db(notification, 'User notification saved')
if saved:
DataManager.push_user_notification(user)
@staticmethod
def push_user_notification(user):
"""
Push user notification using websockets.
"""
if not current_app.config.get('INTEGRATE_SOCKETIO', False):
return False
user_room = 'user_{}'.format(user.id)
emit('notifs-response',
{'meta': 'New notifications',
'notif_count': user.get_unread_notif_count(),
'notifs': user.get_unread_notifs()},
room=user_room,
namespace='/notifs')
emit('notifpage-response',
{'meta': 'New notifpage notifications',
'notif': DataGetter.get_latest_notif(user)},
room=user_room,
namespace='/notifpage')
@staticmethod
def mark_user_notification_as_read(notification):
"""Mark a particular notification read.
"""
notification.has_read = True
save_to_db(notification, 'Mark notification as read')
@staticmethod
def mark_all_user_notification_as_read(user):
"""Mark all notifications for a User as read.
"""
unread_notifs = Notification.query.filter_by(user=user,
has_read=False)
for notif in unread_notifs:
notif.has_read = True
db.session.add(notif)
db.session.commit()
@staticmethod
def add_event_role_invite(email, role_name, event_id):
"""
Save an event role invite to database and return accept and decline links.
:param email: Email for the invite
:param role_name: Role name for the invite
:param event_id: Event id
"""
role = Role.query.filter_by(name=role_name).first()
event = Event.query.get(event_id)
role_invite = RoleInvite(email=email.lower(),
event=event,
role=role,
create_time=datetime.now())
hash = random.getrandbits(128)
role_invite.hash = '%032x' % hash
save_to_db(role_invite, "Role Invite saved")
accept_link = url_for('events.user_role_invite',
event_id=event_id,
hash=role_invite.hash)
decline_link = url_for('events.user_role_invite_decline',
event_id=event_id,
hash=role_invite.hash)
return accept_link, decline_link
@staticmethod
def add_invite_to_event(user_id, event_id):
"""
Invite will be saved to database with proper Event id and User id
:param user_id: Invite belongs to User by user id
:param event_id: Invite belongs to Event by event id
"""
new_invite = Invite(user_id=user_id,
event_id=event_id)
hash = random.getrandbits(128)
new_invite.hash = "%032x" % hash
save_to_db(new_invite, "Invite saved")
record_activity('invite_user', event_id=event_id, user_id=user_id)
@staticmethod
def toggle_email_notification_settings(user_id, value):
"""
Settings will be toggled to database with proper User id
"""
events = DataGetter.get_all_events()
user = DataGetter.get_user(user_id)
notification_ids = []
for event in events:
if user.is_speaker_at_event(event.id) or user.is_organizer(event.id):
email_notification = DataGetter.get_email_notification_settings_by_event_id(user_id, event.id)
if email_notification:
email_notification.next_event = value
email_notification.new_paper = value
email_notification.session_schedule = value
email_notification.session_accept_reject = value
email_notification.after_ticket_purchase = value
save_to_db(email_notification, "EmailSettings Toggled")
notification_ids.append(email_notification.id)
else:
new_email_notification_setting = EmailNotification(next_event=value,
new_paper=value,
session_schedule=value,
session_accept_reject=value,
after_ticket_purchase=value,
user_id=user_id,
event_id=event.id)
save_to_db(new_email_notification_setting, "EmailSetting Toggled")
notification_ids.append(new_email_notification_setting.id)
return notification_ids
@staticmethod
def add_session_to_event(request, event_id, state=None, use_current_user=True):
"""
Session will be saved to database with proper Event id
:param use_current_user:
:param state:
:param request: The request
:param event_id: Session belongs to Event by event id
"""
form = request.form
slide_temp_url = form.get('slides_url')
video_temp_url = form.get('video_url')
audio_temp_url = form.get('audio_url')
slide_file = ''
video_file = ''
audio_file = ''
if slide_temp_url:
slide_file = UploadedFile(get_path_of_temp_url(slide_temp_url), slide_temp_url.rsplit('/', 1)[1])
if video_temp_url:
video_file = UploadedFile(get_path_of_temp_url(video_temp_url), video_temp_url.rsplit('/', 1)[1])
if audio_temp_url:
audio_file = UploadedFile(get_path_of_temp_url(audio_temp_url), audio_temp_url.rsplit('/', 1)[1])
if not state:
state = form.get('state', 'draft')
event = DataGetter.get_event(event_id)
new_session = Session(title=form.get('title', ''),
subtitle=form.get('subtitle', ''),
long_abstract=form.get('long_abstract', ''),
start_time=event.start_time,
end_time=event.start_time + timedelta(hours=1),
event_id=event_id,
short_abstract=form.get('short_abstract', ''),
level=form.get('level', ''),
comments=form.get('comments', ''),
language=form.get('language', ''),
state=state)
if form.get('track', None) != "":
new_session.track_id = form.get('track', None)
if form.get('session_type', None) != "":
new_session.session_type_id = form.get('session_type', None)
speaker = Speaker.query.filter_by(email=form.get('email', '')).filter_by(event_id=event_id).first()
speaker = save_speaker(
request,
event_id=event_id,
speaker=speaker,
user=login.current_user if use_current_user else None
)
new_session.speakers.append(speaker)
save_to_db(new_session, "Session saved")
if state == 'pending':
trigger_new_session_notifications(new_session.id, event=event)
if slide_temp_url != "" and slide_file:
slide_url = upload(
slide_file,
UPLOAD_PATHS['sessions']['slides'].format(
event_id=int(event_id), id=int(new_session.id)
))
new_session.slides = slide_url
if audio_temp_url != "" and audio_file:
audio_url = upload(
audio_file,
UPLOAD_PATHS['sessions']['audio'].format(
event_id=int(event_id), id=int(new_session.id)
))
new_session.audio = audio_url
if video_temp_url != "" and video_file:
video_url = upload(
video_file,
UPLOAD_PATHS['sessions']['video'].format(
event_id=int(event_id), id=int(new_session.id)
))
new_session.video = video_url
record_activity('create_session', session=new_session, event_id=event_id)
update_version(event_id, False, 'sessions_ver')
invite_emails = form.getlist("speakers[email]")
for index, email in enumerate(invite_emails):
if not string_empty(email):
new_invite = Invite(event_id=event_id, session_id=new_session.id)
hash = random.getrandbits(128)
new_invite.hash = "%032x" % hash
save_to_db(new_invite, "Invite saved")
link = url_for('event_sessions.invited_view', session_id=new_session.id, event_id=event_id,
_external=True)
Helper.send_email_invitation(email, new_session.title, link)
# If a user is registered by the email, send a notification as well
user = DataGetter.get_user_by_email(email, no_flash=True)
if user:
cfs_link = url_for('event_detail.display_event_cfs', identifier=event.identifier)
Helper.send_notif_invite_papers(user, event.name, cfs_link, link)
@staticmethod
def add_session_media(request, media):
media_file = DataManager.get_files_from_request(request, media)
url = upload(media_file, UPLOAD_PATHS['temp']['event'].format(uuid=uuid4()))
return url
@staticmethod
def get_files_from_request(request, file_type):
if file_type in request.files and request.files[file_type].filename != '':
return request.files[file_type]
return ""
@staticmethod
def add_speaker_to_session(request, event_id, session_id, user=login.current_user):
"""
Session will be saved to database with proper Event id
:param session_id:
:param user:
:param request: view data form
:param event_id: Session belongs to Event by event id
"""
session = DataGetter.get_session(session_id)
speaker = save_speaker(request, event_id, user=user)
session.speakers.append(speaker)
save_to_db(session, "Session updated")
update_version(event_id, False, "speakers_ver")
update_version(event_id, False, "sessions_ver")
@staticmethod
def session_accept_reject(session, event_id, state, send_email=True, message=None, subject=None):
session.state = state
session.submission_date = datetime.now()
session.submission_modifier = login.current_user.email
session.state_email_sent = False
save_to_db(session, 'Session State Updated')
if send_email:
trigger_session_state_change_notifications(session, event_id, message=message, subject=subject)
flash("The session has been %s" % state)
@staticmethod
def edit_session(request, session, speaker=None):
with db.session.no_autoflush:
form = request.form
event_id = session.event_id
slide_temp_url = form.get('slides_url')
video_temp_url = form.get('video_url')
audio_temp_url = form.get('audio_url')
slide_file = ''
video_file = ''
audio_file = ''
if slide_temp_url and slide_temp_url != session.slides:
slide_file = UploadedFile(get_path_of_temp_url(slide_temp_url), slide_temp_url.rsplit('/', 1)[1])
if video_temp_url and video_temp_url != session.video:
video_file = UploadedFile(get_path_of_temp_url(video_temp_url), video_temp_url.rsplit('/', 1)[1])
if audio_temp_url and audio_temp_url != session.audio:
audio_file = UploadedFile(get_path_of_temp_url(audio_temp_url), audio_temp_url.rsplit('/', 1)[1])
form_state = form.get('state', 'draft')
if slide_temp_url != "" and slide_temp_url != session.slides and slide_file:
slide_temp_url = upload(slide_file,
UPLOAD_PATHS['sessions']['slides'].format(
event_id=int(event_id), id=int(session.id)
))
if audio_temp_url != "" and audio_temp_url != session.audio and audio_file:
audio_temp_url = upload(
audio_file,
UPLOAD_PATHS['sessions']['audio'].format(
event_id=int(event_id), id=int(session.id)
))
if video_temp_url != "" and video_temp_url != session.video and video_file:
video_temp_url = upload(
video_file,
UPLOAD_PATHS['sessions']['video'].format(
event_id=int(event_id), id=int(session.id)
))
session.slides = slide_temp_url
session.audio = audio_temp_url
session.video = video_temp_url
if form_state == 'pending' and session.state != 'pending' and \
session.state != 'accepted' and session.state != 'rejected' and session.state != 'confirmed':
session.state = 'pending'
trigger_new_session_notifications(session.id, event_id=event_id)
session.title = form.get('title', '')
session.subtitle = form.get('subtitle', '')
session.long_abstract = form.get('long_abstract', '')
session.short_abstract = form.get('short_abstract', '')
session.level = form.get('level', '')
session.track_id = form.get('track', None) if form.get('track', None) != "" else None
session.session_type_id = form.get('session_type', None) if form.get('session_type', None) != "" else None
existing_speaker_ids = form.getlist("speakers[]")
current_speaker_ids = []
existing_speaker_ids_by_email = []
save_to_db(session, 'Session Updated')
if speaker:
save_speaker(request, event_id=event_id, speaker=speaker)
for existing_speaker in DataGetter.get_speaker_by_email(form.get("email")).all():
existing_speaker_ids_by_email.append(str(existing_speaker.id))
for current_speaker in session.speakers:
current_speaker_ids.append(str(current_speaker.id))
for current_speaker_id in current_speaker_ids:
if current_speaker_id \
not in existing_speaker_ids and current_speaker_id not in existing_speaker_ids_by_email:
current_speaker = DataGetter.get_speaker(current_speaker_id)
session.speakers.remove(current_speaker)
db.session.commit()
for existing_speaker_id in existing_speaker_ids:
existing_speaker = DataGetter.get_speaker(existing_speaker_id)
if existing_speaker not in session.speakers:
session.speakers.append(existing_speaker)
db.session.commit()
record_activity('update_session', session=session, event_id=event_id)
update_version(event_id, False, "sessions_ver")
@staticmethod
def remove_role(uer_id):
"""
Role will be removed from database
:param uer_id: Role id to remove object
"""
uer = UsersEventsRoles.query.get(uer_id)
record_activity('delete_role', role=uer.role, user=uer.user, event_id=uer.event_id)
delete_from_db(uer, "UER deleted")
flash("You've successfully deleted role.")
@staticmethod
def create_user(userdata, is_verified=False):
user = User(email=userdata[0],
password=userdata[1],
is_verified=is_verified)
# we hash the users password to avoid saving it as plaintext in the db,
# remove to use plain text:
salt = generate_random_salt()
user.password = generate_password_hash(user.password, salt)
hash = random.getrandbits(128)
user.reset_password = str(hash)
user.salt = salt
save_to_db(user, "User created")
record_activity('create_user', user=user)
return user
@staticmethod
def create_super_admin(email, password):
user = User()
user.login = 'super_admin'
user.email = email
salt = generate_random_salt()
password = password
user.password = generate_password_hash(password, salt)
hash = random.getrandbits(128)
user.reset_password = str(hash)
user.salt = salt
user.is_super_admin = True
user.is_admin = True
user.is_verified = True
save_to_db(user, "User created")
return user
@staticmethod
def reset_password(form, reset_hash):
user = User.query.filter_by(reset_password=reset_hash).first()
salt = generate_random_salt()
password = form['new_password_again']
user.password = generate_password_hash(password, salt)
new_hash = random.getrandbits(128)
user.reset_password = new_hash
user.salt = salt
save_to_db(user, "password resetted")
@staticmethod
def update_user(form, user_id, contacts_only_update=False):
user = User.query.filter_by(id=user_id).first()
user_detail = UserDetail.query.filter_by(user_id=user_id).first()
if user.email != form['email']:
record_activity('update_user_email',
user_id=user.id, old=user.email, new=form['email'])
if user.email != form['email']:
user.is_verified = False
serializer = Helper.get_serializer()
data = [form['email']]
form_hash = serializer.dumps(data)
link = url_for('admin.create_account_after_confirmation_view', hash=form_hash, _external=True)
Helper.send_email_when_changes_email(user.email, form['email'])
Helper.send_notif_when_changes_email(user, user.email, form['email'])
Helper.send_email_confirmation(form, link)
user.email = form['email']
user_detail.contact = form['contact']
if not contacts_only_update:
user_detail.firstname = form['firstname']
user_detail.lastname = form['lastname']
if form.get('facebook', '').strip() != '':
user_detail.facebook = 'https://facebook.com/' + form['facebook'].strip()
else:
user_detail.facebook = ''
if form.get('twitter', '').strip() != '':
user_detail.twitter = 'https://twitter.com/' + form['twitter'].strip()
else:
user_detail.twitter = ''
if form.get('instagram', '').strip() != '':
user_detail.instagram = 'https://instagram.com/' + form['instagram'].strip()
else:
user_detail.instagram = ''
if form.get('google', '').strip() != '':
user_detail.google = 'https://plus.google.com/' + form['google'].strip()
else:
user_detail.google = ''
user_detail.details = form['details']
avatar_img = form.get('avatar-img', None)
if string_not_empty(avatar_img) and avatar_img:
user_detail.avatar_uploaded = ""
user_detail.thumbnail = ""
user_detail.small = ""
user_detail.icon = ""
filename = '{}.png'.format(get_image_file_name())
filepath = '{}/static/{}'.format(path.realpath('.'),
avatar_img[len('/serve_static/'):])
# print "File path 1", filepath
avatar_img_file = UploadedFile(filepath, filename)
avatar_img_temp = upload(avatar_img_file, 'users/%d/avatar' % int(user_id))
user_detail.avatar_uploaded = avatar_img_temp
image_sizes = DataGetter.get_image_sizes_by_type(type='profile')
if not image_sizes:
image_sizes = ImageSizes(full_width=150,
full_height=150,
icon_width=35,
icon_height=35,
thumbnail_width=50,
thumbnail_height=50,
type='profile')
save_to_db(image_sizes, "Image Sizes Saved")
filename = '{}.jpg'.format(get_image_file_name())
filepath = '{}/static/{}'.format(path.realpath('.'),
avatar_img[len('/serve_static/'):])
# print "File path 1", filepath
avatar_img_file = UploadedFile(filepath, filename)
temp_img_file = upload_local(avatar_img_file,
'users/{user_id}/temp'.format(user_id=int(user_id)))
temp_img_file = temp_img_file.replace('/serve_', '')
basewidth = image_sizes.full_width
img = Image.open(temp_img_file)
hsize = image_sizes.full_height
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(temp_img_file)
file_name = temp_img_file.rsplit('/', 1)[1]
large_file = UploadedFile(file_path=temp_img_file, filename=file_name)
profile_thumbnail_url = upload(
large_file,
UPLOAD_PATHS['user']['thumbnail'].format(
user_id=int(user_id)
))
basewidth = image_sizes.thumbnail_width
img = Image.open(temp_img_file)
hsize = image_sizes.thumbnail_height
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(temp_img_file)
file_name = temp_img_file.rsplit('/', 1)[1]
thumbnail_file = UploadedFile(file_path=temp_img_file, filename=file_name)
profile_small_url = upload(
thumbnail_file,
UPLOAD_PATHS['user']['small'].format(
user_id=int(user_id)
))
basewidth = image_sizes.icon_width
img = Image.open(temp_img_file)
hsize = image_sizes.icon_height
img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS)
img.save(temp_img_file)
file_name = temp_img_file.rsplit('/', 1)[1]
icon_file = UploadedFile(file_path=temp_img_file, filename=file_name)
profile_icon_url = upload(
icon_file,
UPLOAD_PATHS['user']['icon'].format(
user_id=int(user_id)
))
shutil.rmtree(path='static/media/' + 'users/{user_id}/temp'.format(user_id=int(user_id)))
user_detail.thumbnail = profile_thumbnail_url
user_detail.small = profile_small_url
user_detail.icon = profile_icon_url
user, user_detail, save_to_db(user, "User updated")
record_activity('update_user', user=user)
@staticmethod
def update_user_permissions(form):
for perm in UserPermission.query.all():
ver_user = '{}-verified_user'.format(perm.name)
unver_user = '{}-unverified_user'.format(perm.name)
# anon_user = '{}-anonymous_user'.format(perm.name)
perm.verified_user = True if form.get(ver_user) == 'on' else False
perm.unverified_user = True if form.get(unver_user) == 'on' else False
# perm.anonymous_user = True if form.get(anon_user) == 'on' else False
db.session.add(perm)
db.session.commit()
@staticmethod
def create_custom_sys_role(form):
role_name = form.get('role_name')
sys_role = CustomSysRole(name=role_name)
save_to_db(sys_role)
from app.views.super_admin import PANEL_LIST
for panel in PANEL_LIST:
if form.get(panel):
perm = PanelPermission(panel, sys_role, True)
else:
perm = PanelPermission(panel, sys_role, False)
save_to_db(perm)
@staticmethod
def update_custom_sys_role(form):
role_name = form.get('role_name')
sys_role = CustomSysRole.query.filter_by(name=role_name).first()
sys_role.name = form.get('new_role_name')
db.session.add(sys_role)
from app.views.super_admin import PANEL_LIST
for panel in PANEL_LIST:
perm, _ = get_or_create(PanelPermission, panel_name=panel,
role=sys_role)
if form.get(panel):
perm.can_access = True
else:
perm.can_access = False
db.session.add(perm)
db.session.commit()
@staticmethod
def delete_custom_sys_role(role_id):
sys_role = CustomSysRole.query.get(role_id)
if sys_role:
delete_from_db(sys_role, 'System Role deleted')
@staticmethod
def get_or_create_user_sys_role(user, role):
role, _ = get_or_create(UserSystemRole, user=user, role=role)
save_to_db(role, 'Custom System Role saved')
@staticmethod
def delete_user_sys_role(user, role):
role = UserSystemRole.query.filter_by(user=user, role=role).first()
if role:
delete_from_db(role, 'Custom System Role deleted')
@staticmethod
def update_permissions(form):
oper = {
'c': 'can_create',
'r': 'can_read',
'u': 'can_update',
'd': 'can_delete',
}
for role in Role.query.all():
for service in Service.query.all():
field = role.name + '-' + service.name
perm = Permission.query.filter_by(role=role, service=service).first()
if not perm:
perm = Permission(role=role, service=service)
for v, attr in oper.iteritems():
if v in form.getlist(field):
setattr(perm, oper[v], True)
else:
setattr(perm, oper[v], False)
save_to_db(perm, 'Permission saved')
@staticmethod
def delete_event(e_id):
EventsUsers.query.filter_by(event_id=e_id).delete()
UsersEventsRoles.query.filter_by(event_id=e_id).delete()
EmailNotification.query.filter_by(event_id=e_id).delete()
SessionType.query.filter_by(event_id=e_id).delete()
SocialLink.query.filter_by(event_id=e_id).delete()
Track.query.filter_by(id=e_id).delete()
Invite.query.filter_by(event_id=e_id).delete()
Session.query.filter_by(event_id=e_id).delete()
Event.query.filter_by(id=e_id).delete()
# record_activity('delete_event', event_id=e_id)
db.session.commit()
@staticmethod
def trash_event(e_id):
event = Event.query.get(e_id)
event.in_trash = True
event.trash_date = datetime.now()
save_to_db(event, "Event Added to Trash")
return event
@staticmethod
def add_role_to_event(form, event_id, record=True):
user = User.query.filter_by(email=form['user_email']).first()
role = Role.query.filter_by(name=form['user_role']).first()
uer = UsersEventsRoles(event=Event.query.get(event_id),
user=user, role=role)
save_to_db(uer, "UserEventRole saved")
if record:
record_activity('create_role', role=role, user=user, event_id=event_id)
@staticmethod
def add_attendee_role_to_event(user, event_id):
role = Role.query.filter_by(name=ATTENDEE).first()
uer = UsersEventsRoles(event=Event.query.get(event_id), user=user, role=role)
save_to_db(uer, "Attendee saved")
@staticmethod
def decline_role_invite(role_invite):
role_invite.declined = True
save_to_db(role_invite)
@staticmethod
def update_user_event_role(form, uer):
role = Role.query.filter_by(name=form['user_role']).first()
user = User.query.filter_by(email=form['user_email']).first()
uer.user = user
uer.role_id = role.id
save_to_db(uer, "Event saved")
record_activity('update_role', role=role, user=user, event_id=uer.event_id)
@staticmethod
def create_page(form):
page = Page(name=form.get('name', ''), title=form.get('title', ''), description=form.get('description', ''),
url=form.get('url', ''), place=form.get('place', ''), index=form.get('index', 0),
language=form.get('language', 'en'))
save_to_db(page, "Page created")
cache.delete('pages')
@staticmethod
def update_page(page, form):
page.name = form.get('name', '')
page.title = form.get('title', '')
page.description = form.get('description', '')
page.url = form.get('url', '')
page.place = form.get('place', '')
page.index = form.get('index', '')
page.language = form.get('language', 'en')
save_to_db(page, "Page updated")
cache.delete('pages')
@staticmethod
def create_or_update_message_settings(form):
for mail in MAILS:
mail_status = 1 if form.get(mail + '_mail_status', 'off') == 'on' else 0
notif_status = 1 if form.get(mail + '_notif_status', 'off') == 'on' else 0
user_control_status = 1 if form.get(mail + '_user_control_status', 'off') == 'on' else 0
message_setting = MessageSettings.query.filter_by(action=mail).first()
if message_setting:
message_setting.mail_status = mail_status
message_setting.notif_status = notif_status
message_setting.user_control_status = user_control_status
save_to_db(message_setting, "Message Settings Updated")
else:
message_setting = MessageSettings(action=mail,
mail_status=mail_status,
notif_status=notif_status,
user_control_status=user_control_status)
save_to_db(message_setting, "Message Settings Updated")
def save_to_db(item, msg="Saved to db", print_error=True):
"""Convenience function to wrap a proper DB save
:param print_error:
:param item: will be saved to database
:param msg: Message to log
"""
try:
logging.info(msg)
db.session.add(item)
logging.info('added to session')
db.session.commit()
return True
except Exception, e:
if print_error:
print e
traceback.print_exc()
logging.error('DB Exception! %s' % e)
db.session.rollback()
return False
def delete_from_db(item, msg='Deleted from db'):
"""Convenience function to wrap a proper DB delete
:param item: will be removed from database
:param msg: Message to log
"""
try:
logging.info(msg)
db.session.delete(item)
logging.info('removed from session')
db.session.commit()
return True
except Exception, error:
logging.error('DB Exception! %s' % error)
db.session.rollback()
return False
def get_google_auth(state=None, token=None):
if token:
return OAuth2Session(OAuth.get_client_id(), token=token)
if state:
return OAuth2Session(OAuth.get_client_id(), state=state, scope=OAuth.SCOPE,
redirect_uri=OAuth.get_redirect_uri())
oauth = OAuth2Session(OAuth.get_client_id(), scope=OAuth.SCOPE, redirect_uri=OAuth.get_redirect_uri())
return oauth
def get_facebook_auth(state=None, token=None):
if token:
return OAuth2Session(FbOAuth.get_client_id(), token=token)
if state:
return OAuth2Session(FbOAuth.get_client_id(), state=state, scope=FbOAuth.SCOPE,
redirect_uri=FbOAuth.get_redirect_uri())
oauth = OAuth2Session(FbOAuth.get_client_id(), scope=FbOAuth.SCOPE, redirect_uri=FbOAuth.get_redirect_uri())
return oauth
def get_instagram_auth(state=None, token=None):
if token:
return OAuth2Session(InstagramOAuth.get_client_id(), token=token)
if state:
return OAuth2Session(InstagramOAuth.get_client_id(), state=state,
redirect_uri=InstagramOAuth.get_redirect_uri())
# scope = "+".join(InstagramOAuth.SCOPE)
oauth = OAuth2Session(InstagramOAuth.get_client_id(), redirect_uri=InstagramOAuth.get_redirect_uri())
return oauth
def get_twitter_auth_url():
consumer = oauth2.Consumer(key=TwitterOAuth.get_client_id(),
secret=TwitterOAuth.get_client_secret())
client = oauth2.Client(consumer)
resp, content = client.request('https://api.twitter.com/oauth/request_token', "GET")
return content + "&redirect_uri" + TwitterOAuth.get_redirect_uri(), consumer
def create_user_oauth(user, user_data, token, method):
if user is None:
user = User()
user.email = user_data['email']
if method == 'Google':
user.avatar = user_data['picture']
if method == 'Facebook':
user.avatar = user_data['picture']['data']['url']
user.tokens = json.dumps(token)
user.is_verified = True
save_to_db(user, "User created")
user_detail = UserDetail.query.filter_by(user_id=user.id).first()
if 'http' in user.avatar:
f_name, uploaded_file = uploaded_file_provided_by_url(user.avatar)
avatar = upload(uploaded_file, 'users/%d/avatar' % int(user.id))
user_detail.avatar_uploaded = avatar
user_detail.firstname = user_data['name']
save_to_db(user, "User Details Updated")
return user
def create_user_password(form, user):
salt = generate_random_salt()
password = form['new_password_again']
user.password = generate_password_hash(password, salt)
hash = random.getrandbits(128)
user.reset_password = str(hash)
user.salt = salt
user.is_verified = True
save_to_db(user, "User password created")
return user
def user_logged_in(user):
speakers = DataGetter.get_speaker_by_email(user.email).all()
for speaker in speakers:
if not speaker.user:
speaker.user = user
role = Role.query.filter_by(name='speaker').first()
event = DataGetter.get_event(speaker.event_id)
uer = UsersEventsRoles(user=user, event=event, role=role)
save_to_db(uer)
save_to_db(speaker)
return True
def record_activity(template, login_user=None, **kwargs):
"""
record an activity
"""
if not login_user and hasattr(g, 'user'):
login_user = g.user
if not login_user and login.current_user.is_authenticated:
login_user = login.current_user
if login_user:
actor = login_user.email + ' (' + str(login_user.id) + ')'
else:
actor = 'Anonymous'
id_str = ' (%d)'
sequence = '"%s"'
# add more information for objects
for k in kwargs:
v = kwargs[k]
if k.find('_id') > -1:
kwargs[k] = str(v)
elif k.startswith('user'):
kwargs[k] = sequence % v.email + id_str % v.id
elif k.startswith('role'):
kwargs[k] = sequence % v.title_name
elif k.startswith('session'):
kwargs[k] = sequence % v.title + id_str % v.id
elif k.startswith('track'):
kwargs[k] = sequence % v.name + id_str % v.id
elif k.startswith('speaker'):
kwargs[k] = sequence % v.name + id_str % v.id
else:
kwargs[k] = str(v)
try:
msg = ACTIVITIES[template].format(**kwargs)
except Exception: # in case some error happened, not good
msg = '[ERROR LOGGING] %s' % template
# conn.execute(Activity.__table__.insert().values(
# actor=actor, action=msg, time=datetime.now()
# ))
activity = Activity(actor=actor, action=msg)
save_to_db(activity, 'Activity Recorded')
def update_version(event_id, is_created, column_to_increment):
"""Function responsible for increasing version when some data will be
created or changed
:param event_id: Event id
:param is_created: Object exist True/False
:param column_to_increment: which column should be increment
"""
VersionUpdater(event_id=event_id,
is_created=is_created,
column_to_increment=column_to_increment).update()
def get_or_create(model, **kwargs):
was_created = False
instance = db.session.query(model).filter_by(**kwargs).first()
if instance:
return instance, was_created
else:
instance = model(**kwargs)
db.session.add(instance)
db.session.commit()
was_created = True
return instance, was_created
def update_or_create(model, event_id, **kwargs):
"""
Update or create an item based on event id as PK
"""
was_created = False
instance = db.session.query(model).filter_by(event_id=event_id).first()
if instance:
db.session.query(model).filter_by(event_id=event_id).update(kwargs)
else:
was_created = True
instance = model(event_id=event_id, **kwargs)
db.session.add(instance)
db.session.commit()
return instance, was_created
def update_role_to_admin(form, user_id):
user = DataGetter.get_user(user_id)
old_admin_status = user.is_admin
user.is_admin = True if form['admin_perm'] == 'isAdmin' else False
save_to_db(user, "User role Updated")
if old_admin_status != user.is_admin:
record_activity(
'system_admin', user=user,
status='Assigned' if user.is_admin else 'Unassigned'
)
def trash_user(user_id):
user = DataGetter.get_user(user_id)
user.in_trash = True
user.trash_date = datetime.now()
save_to_db(user, 'User has been added to trash')
return user
def trash_session(session_id):
session = DataGetter.get_session(session_id)
session.in_trash = True
session.trash_date = datetime.now()
save_to_db(session, "Session added to Trash")
update_version(session.event_id, False, 'sessions_ver')
return session
def restore_event(event_id):
event = DataGetter.get_event(event_id)
event.in_trash = False
save_to_db(event, "Event restored from Trash")
def restore_user(user_id):
user = DataGetter.get_user(user_id)
user.in_trash = False
save_to_db(user, "User restored from Trash")
def restore_session(session_id):
session = DataGetter.get_session(session_id)
session.in_trash = False
save_to_db(session, "Session restored from Trash")
update_version(session.event_id, False, 'sessions_ver')
def uploaded_file_provided_by_url(url):
response_file = urlopen(url)
filename = get_image_file_name() + '.jpg'
file_path = os.path.realpath('.') + '/static/uploads/' + filename
fh = open(file_path, "wb")
fh.write(response_file.read())
fh.close()
return filename, UploadedFile(file_path, filename)
| gpl-3.0 | 4,234,962,103,604,400,600 | 39.804368 | 118 | 0.575535 | false |
johnnadratowski/examples | python/three_diamonds/three_diamonds.py | 2 | 1968 | import os
import requests
import googlemaps
from geopy.geocoders import Nominatim
from pyicloud import PyiCloudService
def get_icloud_device(cloud, device_id):
if device_id in cloud.devices:
return cloud.devices[device_id]
else:
return next(device for device in cloud.devices
if device_id.lower() in device.content['deviceDisplayName'].lower())
def get_icloud_coords(user, password, device_id):
cloud = PyiCloudService(user, password)
device = get_icloud_device(cloud, device_id)
coords = device.location()
return coords["latitude"], coords["longitude"]
def get_address(lat, lng):
geo = Nominatim()
location = geo.reverse((lat, lng))
return location
def get_my_location_by_my_current_ip():
resp = requests.get('http://freegeoip.net/json')
data = resp.json()
return data['latitude'], data['longitude']
def get_directions(key, origin_lat, origin_lng, dest_lat, dest_lng):
gmaps = googlemaps.Client(key=key)
directions = gmaps.directions((origin_lat, origin_lng), (dest_lat, dest_lng))
return directions
if __name__ == '__main__':
icloud_user = os.environ["ICLOUD_USER"]
icloud_password = os.environ["ICLOUD_PASS"]
icloud_deviceid = os.environ["ICLOUD_DEVICE"]
gmaps_api_key = os.environ["GMAPS_API_KEY"]
icloud_coords = get_icloud_coords(icloud_user, icloud_password, icloud_deviceid)
icloud_addr = get_address(*icloud_coords)
local_coords = get_my_location_by_my_current_ip()
local_addr = get_address(*local_coords)
directions = get_directions(gmaps_api_key, icloud_coords[0], icloud_coords[1], local_coords[0], local_coords[1])
total_directions = directions[0]["legs"][0]
print("Remote Coords: ", icloud_coords)
print("Remote Addr: ", icloud_addr)
print("Local Coords: ", local_coords)
print("Local Addr: ", local_addr)
print("Distance: ", total_directions["distance"], total_directions["duration"])
| mit | 7,761,675,304,604,290,000 | 34.142857 | 116 | 0.683943 | false |
charlesccychen/beam | sdks/python/apache_beam/runners/direct/helper_transforms.py | 5 | 3879 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import collections
import itertools
import apache_beam as beam
from apache_beam import typehints
from apache_beam.internal.util import ArgumentPlaceholder
from apache_beam.transforms.combiners import _CurriedFn
from apache_beam.utils.windowed_value import WindowedValue
class LiftedCombinePerKey(beam.PTransform):
"""An implementation of CombinePerKey that does mapper-side pre-combining.
"""
def __init__(self, combine_fn, args, kwargs):
args_to_check = itertools.chain(args, kwargs.values())
if isinstance(combine_fn, _CurriedFn):
args_to_check = itertools.chain(args_to_check,
combine_fn.args,
combine_fn.kwargs.values())
if any(isinstance(arg, ArgumentPlaceholder)
for arg in args_to_check):
# This isn't implemented in dataflow either...
raise NotImplementedError('Deferred CombineFn side inputs.')
self._combine_fn = beam.transforms.combiners.curry_combine_fn(
combine_fn, args, kwargs)
def expand(self, pcoll):
return (
pcoll
| beam.ParDo(PartialGroupByKeyCombiningValues(self._combine_fn))
| beam.GroupByKey()
| beam.ParDo(FinishCombine(self._combine_fn)))
class PartialGroupByKeyCombiningValues(beam.DoFn):
"""Aggregates values into a per-key-window cache.
As bundles are in-memory-sized, we don't bother flushing until the very end.
"""
def __init__(self, combine_fn):
self._combine_fn = combine_fn
def start_bundle(self):
self._cache = collections.defaultdict(self._combine_fn.create_accumulator)
def process(self, element, window=beam.DoFn.WindowParam):
k, vi = element
self._cache[k, window] = self._combine_fn.add_input(self._cache[k, window],
vi)
def finish_bundle(self):
for (k, w), va in self._cache.items():
yield WindowedValue((k, va), w.end, (w,))
def default_type_hints(self):
hints = self._combine_fn.get_type_hints().copy()
K = typehints.TypeVariable('K')
if hints.input_types:
args, kwargs = hints.input_types
args = (typehints.Tuple[K, args[0]],) + args[1:]
hints.set_input_types(*args, **kwargs)
else:
hints.set_input_types(typehints.Tuple[K, typehints.Any])
hints.set_output_types(typehints.Tuple[K, typehints.Any])
return hints
class FinishCombine(beam.DoFn):
"""Merges partially combined results.
"""
def __init__(self, combine_fn):
self._combine_fn = combine_fn
def process(self, element):
k, vs = element
return [(
k,
self._combine_fn.extract_output(
self._combine_fn.merge_accumulators(vs)))]
def default_type_hints(self):
hints = self._combine_fn.get_type_hints().copy()
K = typehints.TypeVariable('K')
hints.set_input_types(typehints.Tuple[K, typehints.Any])
if hints.output_types:
main_output_type = hints.simple_output_type('')
hints.set_output_types(typehints.Tuple[K, main_output_type])
return hints
| apache-2.0 | -5,541,005,732,885,359,000 | 35.252336 | 79 | 0.681103 | false |
Cinntax/home-assistant | tests/components/daikin/test_config_flow.py | 3 | 3176 | # pylint: disable=W0621
"""Tests for the Daikin config flow."""
import asyncio
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.daikin import config_flow
from homeassistant.components.daikin.const import KEY_IP, KEY_MAC
from homeassistant.const import CONF_HOST
from tests.common import MockConfigEntry, MockDependency
MAC = "AABBCCDDEEFF"
HOST = "127.0.0.1"
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.FlowHandler()
flow.hass = hass
return flow
@pytest.fixture
def mock_daikin():
"""Mock pydaikin."""
async def mock_daikin_init():
"""Mock the init function in pydaikin."""
pass
with MockDependency("pydaikin.appliance") as mock_daikin_:
mock_daikin_.Appliance().values.get.return_value = "AABBCCDDEEFF"
mock_daikin_.Appliance().init = mock_daikin_init
yield mock_daikin_
async def test_user(hass, mock_daikin):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_user({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][KEY_MAC] == MAC
async def test_abort_if_already_setup(hass, mock_daikin):
"""Test we abort if Daikin is already setup."""
flow = init_config_flow(hass)
MockConfigEntry(domain="daikin", data={KEY_MAC: MAC}).add_to_hass(hass)
result = await flow.async_step_user({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_import(hass, mock_daikin):
"""Test import step."""
flow = init_config_flow(hass)
result = await flow.async_step_import({})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_import({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][KEY_MAC] == MAC
async def test_discovery(hass, mock_daikin):
"""Test discovery step."""
flow = init_config_flow(hass)
result = await flow.async_step_discovery({KEY_IP: HOST, KEY_MAC: MAC})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"][CONF_HOST] == HOST
assert result["data"][KEY_MAC] == MAC
@pytest.mark.parametrize(
"s_effect,reason",
[(asyncio.TimeoutError, "device_timeout"), (Exception, "device_fail")],
)
async def test_device_abort(hass, mock_daikin, s_effect, reason):
"""Test device abort."""
flow = init_config_flow(hass)
mock_daikin.Appliance.side_effect = s_effect
result = await flow.async_step_user({CONF_HOST: HOST})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == reason
| apache-2.0 | 2,297,188,636,298,029,600 | 30.445545 | 75 | 0.674748 | false |
rmcgibbo/scipy | scipy/spatial/kdtree.py | 23 | 37353 | # Copyright Anne M. Archibald 2008
# Released under the scipy license
from __future__ import division, print_function, absolute_import
import sys
import numpy as np
from heapq import heappush, heappop
import scipy.sparse
__all__ = ['minkowski_distance_p', 'minkowski_distance',
'distance_matrix',
'Rectangle', 'KDTree']
def minkowski_distance_p(x, y, p=2):
"""
Compute the p-th power of the L**p distance between two arrays.
For efficiency, this function computes the L**p distance but does
not extract the pth root. If `p` is 1 or infinity, this is equal to
the actual L**p distance.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> minkowski_distance_p([[0,0],[0,0]], [[1,1],[0,1]])
array([2, 1])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf:
return np.amax(np.abs(y-x), axis=-1)
elif p == 1:
return np.sum(np.abs(y-x), axis=-1)
else:
return np.sum(np.abs(y-x)**p, axis=-1)
def minkowski_distance(x, y, p=2):
"""
Compute the L**p distance between two arrays.
Parameters
----------
x : (M, K) array_like
Input array.
y : (N, K) array_like
Input array.
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
Examples
--------
>>> minkowski_distance([[0,0],[0,0]], [[1,1],[0,1]])
array([ 1.41421356, 1. ])
"""
x = np.asarray(x)
y = np.asarray(y)
if p == np.inf or p == 1:
return minkowski_distance_p(x, y, p)
else:
return minkowski_distance_p(x, y, p)**(1./p)
class Rectangle(object):
"""Hyperrectangle class.
Represents a Cartesian product of intervals.
"""
def __init__(self, maxes, mins):
"""Construct a hyperrectangle."""
self.maxes = np.maximum(maxes,mins).astype(np.float)
self.mins = np.minimum(maxes,mins).astype(np.float)
self.m, = self.maxes.shape
def __repr__(self):
return "<Rectangle %s>" % list(zip(self.mins, self.maxes))
def volume(self):
"""Total volume."""
return np.prod(self.maxes-self.mins)
def split(self, d, split):
"""
Produce two hyperrectangles by splitting.
In general, if you need to compute maximum and minimum
distances to the children, it can be done more efficiently
by updating the maximum and minimum distances to the parent.
Parameters
----------
d : int
Axis to split hyperrectangle along.
split : float
Position along axis `d` to split at.
"""
mid = np.copy(self.maxes)
mid[d] = split
less = Rectangle(self.mins, mid)
mid = np.copy(self.mins)
mid[d] = split
greater = Rectangle(mid, self.maxes)
return less, greater
def min_distance_point(self, x, p=2.):
"""
Return the minimum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-x,x-self.maxes)),p)
def max_distance_point(self, x, p=2.):
"""
Return the maximum distance between input and points in the hyperrectangle.
Parameters
----------
x : array_like
Input array.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-x,x-self.mins),p)
def min_distance_rectangle(self, other, p=2.):
"""
Compute the minimum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float
Input.
"""
return minkowski_distance(0, np.maximum(0,np.maximum(self.mins-other.maxes,other.mins-self.maxes)),p)
def max_distance_rectangle(self, other, p=2.):
"""
Compute the maximum distance between points in the two hyperrectangles.
Parameters
----------
other : hyperrectangle
Input.
p : float, optional
Input.
"""
return minkowski_distance(0, np.maximum(self.maxes-other.mins,other.maxes-self.mins),p)
class KDTree(object):
"""
kd-tree for quick nearest-neighbor lookup
This class provides an index into a set of k-dimensional points which
can be used to rapidly look up the nearest neighbors of any point.
Parameters
----------
data : (N,K) array_like
The data points to be indexed. This array is not copied, and
so modifying this data will result in bogus results.
leafsize : int, optional
The number of points at which the algorithm switches over to
brute-force. Has to be positive.
Raises
------
RuntimeError
The maximum recursion limit can be exceeded for large data
sets. If this happens, either increase the value for the `leafsize`
parameter or increase the recursion limit by::
>>> import sys
>>> sys.setrecursionlimit(10000)
Notes
-----
The algorithm used is described in Maneewongvatana and Mount 1999.
The general idea is that the kd-tree is a binary tree, each of whose
nodes represents an axis-aligned hyperrectangle. Each node specifies
an axis and splits the set of points based on whether their coordinate
along that axis is greater than or less than a particular value.
During construction, the axis and splitting point are chosen by the
"sliding midpoint" rule, which ensures that the cells do not all
become long and thin.
The tree can be queried for the r closest neighbors of any given point
(optionally returning only those within some maximum distance of the
point). It can also be queried, with a substantial gain in efficiency,
for the r approximate closest neighbors.
For large dimensions (20 is already large) do not expect this to run
significantly faster than brute force. High-dimensional nearest-neighbor
queries are a substantial open problem in computer science.
The tree also supports all-neighbors queries, both with arrays of points
and with other kd-trees. These do use a reasonably efficient algorithm,
but the kd-tree is not necessarily the best data structure for this
sort of calculation.
"""
def __init__(self, data, leafsize=10):
self.data = np.asarray(data)
self.n, self.m = np.shape(self.data)
self.leafsize = int(leafsize)
if self.leafsize < 1:
raise ValueError("leafsize must be at least 1")
self.maxes = np.amax(self.data,axis=0)
self.mins = np.amin(self.data,axis=0)
self.tree = self.__build(np.arange(self.n), self.maxes, self.mins)
class node(object):
if sys.version_info[0] >= 3:
def __lt__(self, other):
return id(self) < id(other)
def __gt__(self, other):
return id(self) > id(other)
def __le__(self, other):
return id(self) <= id(other)
def __ge__(self, other):
return id(self) >= id(other)
def __eq__(self, other):
return id(self) == id(other)
class leafnode(node):
def __init__(self, idx):
self.idx = idx
self.children = len(idx)
class innernode(node):
def __init__(self, split_dim, split, less, greater):
self.split_dim = split_dim
self.split = split
self.less = less
self.greater = greater
self.children = less.children+greater.children
def __build(self, idx, maxes, mins):
if len(idx) <= self.leafsize:
return KDTree.leafnode(idx)
else:
data = self.data[idx]
# maxes = np.amax(data,axis=0)
# mins = np.amin(data,axis=0)
d = np.argmax(maxes-mins)
maxval = maxes[d]
minval = mins[d]
if maxval == minval:
# all points are identical; warn user?
return KDTree.leafnode(idx)
data = data[:,d]
# sliding midpoint rule; see Maneewongvatana and Mount 1999
# for arguments that this is a good idea.
split = (maxval+minval)/2
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(less_idx) == 0:
split = np.amin(data)
less_idx = np.nonzero(data <= split)[0]
greater_idx = np.nonzero(data > split)[0]
if len(greater_idx) == 0:
split = np.amax(data)
less_idx = np.nonzero(data < split)[0]
greater_idx = np.nonzero(data >= split)[0]
if len(less_idx) == 0:
# _still_ zero? all must have the same value
if not np.all(data == data[0]):
raise ValueError("Troublesome data array: %s" % data)
split = data[0]
less_idx = np.arange(len(data)-1)
greater_idx = np.array([len(data)-1])
lessmaxes = np.copy(maxes)
lessmaxes[d] = split
greatermins = np.copy(mins)
greatermins[d] = split
return KDTree.innernode(d, split,
self.__build(idx[less_idx],lessmaxes,mins),
self.__build(idx[greater_idx],maxes,greatermins))
def __query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
side_distances = np.maximum(0,np.maximum(x-self.maxes,self.mins-x))
if p != np.inf:
side_distances **= p
min_distance = np.sum(side_distances)
else:
min_distance = np.amax(side_distances)
# priority queue for chasing nodes
# entries are:
# minimum distance between the cell and the target
# distances between the nearest side of the cell and the target
# the head node of the cell
q = [(min_distance,
tuple(side_distances),
self.tree)]
# priority queue for the nearest neighbors
# furthest known neighbor first
# entries are (-distance**p, i)
neighbors = []
if eps == 0:
epsfac = 1
elif p == np.inf:
epsfac = 1/(1+eps)
else:
epsfac = 1/(1+eps)**p
if p != np.inf and distance_upper_bound != np.inf:
distance_upper_bound = distance_upper_bound**p
while q:
min_distance, side_distances, node = heappop(q)
if isinstance(node, KDTree.leafnode):
# brute-force
data = self.data[node.idx]
ds = minkowski_distance_p(data,x[np.newaxis,:],p)
for i in range(len(ds)):
if ds[i] < distance_upper_bound:
if len(neighbors) == k:
heappop(neighbors)
heappush(neighbors, (-ds[i], node.idx[i]))
if len(neighbors) == k:
distance_upper_bound = -neighbors[0][0]
else:
# we don't push cells that are too far onto the queue at all,
# but since the distance_upper_bound decreases, we might get
# here even if the cell's too far
if min_distance > distance_upper_bound*epsfac:
# since this is the nearest cell, we're done, bail out
break
# compute minimum distances to the children and push them on
if x[node.split_dim] < node.split:
near, far = node.less, node.greater
else:
near, far = node.greater, node.less
# near child is at the same distance as the current node
heappush(q,(min_distance, side_distances, near))
# far child is further by an amount depending only
# on the split value
sd = list(side_distances)
if p == np.inf:
min_distance = max(min_distance, abs(node.split-x[node.split_dim]))
elif p == 1:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
else:
sd[node.split_dim] = np.abs(node.split-x[node.split_dim])**p
min_distance = min_distance - side_distances[node.split_dim] + sd[node.split_dim]
# far child might be too far, if so, don't bother pushing it
if min_distance <= distance_upper_bound*epsfac:
heappush(q,(min_distance, tuple(sd), far))
if p == np.inf:
return sorted([(-d,i) for (d,i) in neighbors])
else:
return sorted([((-d)**(1./p),i) for (d,i) in neighbors])
def query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf):
"""
Query the kd-tree for nearest neighbors
Parameters
----------
x : array_like, last dimension self.m
An array of points to query.
k : int, optional
The number of nearest neighbors to return.
eps : nonnegative float, optional
Return approximate nearest neighbors; the kth returned value
is guaranteed to be no further than (1+eps) times the
distance to the real kth nearest neighbor.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use.
1 is the sum-of-absolute-values "Manhattan" distance
2 is the usual Euclidean distance
infinity is the maximum-coordinate-difference distance
distance_upper_bound : nonnegative float, optional
Return only neighbors within this distance. This is used to prune
tree searches, so if you are doing a series of nearest-neighbor
queries, it may help to supply the distance to the nearest neighbor
of the most recent point.
Returns
-------
d : float or array of floats
The distances to the nearest neighbors.
If x has shape tuple+(self.m,), then d has shape tuple if
k is one, or tuple+(k,) if k is larger than one. Missing
neighbors (e.g. when k > n or distance_upper_bound is
given) are indicated with infinite distances. If k is None,
then d is an object array of shape tuple, containing lists
of distances. In either case the hits are sorted by distance
(nearest first).
i : integer or array of integers
The locations of the neighbors in self.data. i is the same
shape as d.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:5, 2:8]
>>> tree = spatial.KDTree(list(zip(x.ravel(), y.ravel())))
>>> tree.data
array([[0, 2],
[0, 3],
[0, 4],
[0, 5],
[0, 6],
[0, 7],
[1, 2],
[1, 3],
[1, 4],
[1, 5],
[1, 6],
[1, 7],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 6],
[2, 7],
[3, 2],
[3, 3],
[3, 4],
[3, 5],
[3, 6],
[3, 7],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 7]])
>>> pts = np.array([[0, 0], [2.1, 2.9]])
>>> tree.query(pts)
(array([ 2. , 0.14142136]), array([ 0, 13]))
>>> tree.query(pts[0])
(2.0, 0)
"""
x = np.asarray(x)
if np.shape(x)[-1] != self.m:
raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
if p < 1:
raise ValueError("Only p-norms with 1<=p<=infinity permitted")
retshape = np.shape(x)[:-1]
if retshape != ():
if k is None:
dd = np.empty(retshape,dtype=np.object)
ii = np.empty(retshape,dtype=np.object)
elif k > 1:
dd = np.empty(retshape+(k,),dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape+(k,),dtype=np.int)
ii.fill(self.n)
elif k == 1:
dd = np.empty(retshape,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(retshape,dtype=np.int)
ii.fill(self.n)
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
for c in np.ndindex(retshape):
hits = self.__query(x[c], k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
dd[c] = [d for (d,i) in hits]
ii[c] = [i for (d,i) in hits]
elif k > 1:
for j in range(len(hits)):
dd[c+(j,)], ii[c+(j,)] = hits[j]
elif k == 1:
if len(hits) > 0:
dd[c], ii[c] = hits[0]
else:
dd[c] = np.inf
ii[c] = self.n
return dd, ii
else:
hits = self.__query(x, k=k, eps=eps, p=p, distance_upper_bound=distance_upper_bound)
if k is None:
return [d for (d,i) in hits], [i for (d,i) in hits]
elif k == 1:
if len(hits) > 0:
return hits[0]
else:
return np.inf, self.n
elif k > 1:
dd = np.empty(k,dtype=np.float)
dd.fill(np.inf)
ii = np.empty(k,dtype=np.int)
ii.fill(self.n)
for j in range(len(hits)):
dd[j], ii[j] = hits[j]
return dd, ii
else:
raise ValueError("Requested %s nearest neighbors; acceptable numbers are integers greater than or equal to one, or None")
def __query_ball_point(self, x, r, p=2., eps=0):
R = Rectangle(self.maxes, self.mins)
def traverse_checking(node, rect):
if rect.min_distance_point(x, p) > r / (1. + eps):
return []
elif rect.max_distance_point(x, p) < r * (1. + eps):
return traverse_no_checking(node)
elif isinstance(node, KDTree.leafnode):
d = self.data[node.idx]
return node.idx[minkowski_distance(d, x, p) <= r].tolist()
else:
less, greater = rect.split(node.split_dim, node.split)
return traverse_checking(node.less, less) + \
traverse_checking(node.greater, greater)
def traverse_no_checking(node):
if isinstance(node, KDTree.leafnode):
return node.idx.tolist()
else:
return traverse_no_checking(node.less) + \
traverse_no_checking(node.greater)
return traverse_checking(self.tree, R)
def query_ball_point(self, x, r, p=2., eps=0):
"""Find all points within distance r of point(s) x.
Parameters
----------
x : array_like, shape tuple + (self.m,)
The point or points to search for neighbors of.
r : positive float
The radius of points to return.
p : float, optional
Which Minkowski p-norm to use. Should be in the range [1, inf].
eps : nonnegative float, optional
Approximate search. Branches of the tree are not explored if their
nearest points are further than ``r / (1 + eps)``, and branches are
added in bulk if their furthest points are nearer than
``r * (1 + eps)``.
Returns
-------
results : list or array of lists
If `x` is a single point, returns a list of the indices of the
neighbors of `x`. If `x` is an array of points, returns an object
array of shape tuple containing lists of neighbors.
Notes
-----
If you have many points whose neighbors you want to find, you may save
substantial amounts of time by putting them in a KDTree and using
query_ball_tree.
Examples
--------
>>> from scipy import spatial
>>> x, y = np.mgrid[0:4, 0:4]
>>> points = zip(x.ravel(), y.ravel())
>>> tree = spatial.KDTree(points)
>>> tree.query_ball_point([2, 0], 1)
[4, 8, 9, 12]
"""
x = np.asarray(x)
if x.shape[-1] != self.m:
raise ValueError("Searching for a %d-dimensional point in a "
"%d-dimensional KDTree" % (x.shape[-1], self.m))
if len(x.shape) == 1:
return self.__query_ball_point(x, r, p, eps)
else:
retshape = x.shape[:-1]
result = np.empty(retshape, dtype=np.object)
for c in np.ndindex(retshape):
result[c] = self.__query_ball_point(x[c], r, p=p, eps=eps)
return result
def query_ball_tree(self, other, r, p=2., eps=0):
"""Find all pairs of points whose distance is at most r
Parameters
----------
other : KDTree instance
The tree containing points to search against.
r : float
The maximum distance, has to be positive.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : list of lists
For each element ``self.data[i]`` of this tree, ``results[i]`` is a
list of the indices of its neighbors in ``other.data``.
"""
results = [[] for i in range(self.n)]
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
d = other.data[node2.idx]
for i in node1.idx:
results[i] += node2.idx[minkowski_distance(d,self.data[i],p) <= r].tolist()
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
results[i] += node2.idx.tolist()
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return results
def query_pairs(self, r, p=2., eps=0):
"""
Find all pairs of points within a distance.
Parameters
----------
r : positive float
The maximum distance.
p : float, optional
Which Minkowski norm to use. `p` has to meet the condition
``1 <= p <= infinity``.
eps : float, optional
Approximate search. Branches of the tree are not explored
if their nearest points are further than ``r/(1+eps)``, and
branches are added in bulk if their furthest points are nearer
than ``r * (1+eps)``. `eps` has to be non-negative.
Returns
-------
results : set
Set of pairs ``(i,j)``, with ``i < j``, for which the corresponding
positions are close.
"""
results = set()
def traverse_checking(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > r/(1.+eps):
return
elif rect1.max_distance_rectangle(rect2, p) < r*(1.+eps):
traverse_no_checking(node1, node2)
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
else:
d = self.data[node2.idx]
for i in node1.idx:
for j in node2.idx[minkowski_distance(d,self.data[i],p) <= r]:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1,rect1,node2.less,less)
traverse_checking(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse_checking(node1.less,less,node2,rect2)
traverse_checking(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse_checking(node1.less,less1,node2.less,less2)
traverse_checking(node1.less,less1,node2.greater,greater2)
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) != id(node2):
traverse_checking(node1.greater,greater1,node2.less,less2)
traverse_checking(node1.greater,greater1,node2.greater,greater2)
def traverse_no_checking(node1, node2):
if isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
# Special care to avoid duplicate pairs
if id(node1) == id(node2):
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
else:
for i in node1.idx:
for j in node2.idx:
if i < j:
results.add((i,j))
elif j < i:
results.add((j,i))
else:
traverse_no_checking(node1, node2.less)
traverse_no_checking(node1, node2.greater)
else:
# Avoid traversing (node1.less, node2.greater) and
# (node1.greater, node2.less) (it's the same node pair twice
# over, which is the source of the complication in the
# original KDTree.query_pairs)
if id(node1) == id(node2):
traverse_no_checking(node1.less, node2.less)
traverse_no_checking(node1.less, node2.greater)
traverse_no_checking(node1.greater, node2.greater)
else:
traverse_no_checking(node1.less, node2)
traverse_no_checking(node1.greater, node2)
traverse_checking(self.tree, Rectangle(self.maxes, self.mins),
self.tree, Rectangle(self.maxes, self.mins))
return results
def count_neighbors(self, other, r, p=2.):
"""
Count how many nearby pairs can be formed.
Count the number of pairs (x1,x2) can be formed, with x1 drawn
from self and x2 drawn from `other`, and where
``distance(x1, x2, p) <= r``.
This is the "two-point correlation" described in Gray and Moore 2000,
"N-body problems in statistical learning", and the code here is based
on their algorithm.
Parameters
----------
other : KDTree instance
The other tree to draw points from.
r : float or one-dimensional array of floats
The radius to produce a count for. Multiple radii are searched with
a single tree traversal.
p : float, 1<=p<=infinity, optional
Which Minkowski p-norm to use
Returns
-------
result : int or 1-D array of ints
The number of pairs. Note that this is internally stored in a numpy
int, and so may overflow if very large (2e9).
"""
def traverse(node1, rect1, node2, rect2, idx):
min_r = rect1.min_distance_rectangle(rect2,p)
max_r = rect1.max_distance_rectangle(rect2,p)
c_greater = r[idx] > max_r
result[idx[c_greater]] += node1.children*node2.children
idx = idx[(min_r <= r[idx]) & (r[idx] <= max_r)]
if len(idx) == 0:
return
if isinstance(node1,KDTree.leafnode):
if isinstance(node2,KDTree.leafnode):
ds = minkowski_distance(self.data[node1.idx][:,np.newaxis,:],
other.data[node2.idx][np.newaxis,:,:],
p).ravel()
ds.sort()
result[idx] += np.searchsorted(ds,r[idx],side='right')
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1, rect1, node2.less, less, idx)
traverse(node1, rect1, node2.greater, greater, idx)
else:
if isinstance(node2,KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less, less, node2, rect2, idx)
traverse(node1.greater, greater, node2, rect2, idx)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2,idx)
traverse(node1.less,less1,node2.greater,greater2,idx)
traverse(node1.greater,greater1,node2.less,less2,idx)
traverse(node1.greater,greater1,node2.greater,greater2,idx)
R1 = Rectangle(self.maxes, self.mins)
R2 = Rectangle(other.maxes, other.mins)
if np.shape(r) == ():
r = np.array([r])
result = np.zeros(1,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(1))
return result[0]
elif len(np.shape(r)) == 1:
r = np.asarray(r)
n, = r.shape
result = np.zeros(n,dtype=int)
traverse(self.tree, R1, other.tree, R2, np.arange(n))
return result
else:
raise ValueError("r must be either a single value or a one-dimensional array of values")
def sparse_distance_matrix(self, other, max_distance, p=2.):
"""
Compute a sparse distance matrix
Computes a distance matrix between two KDTrees, leaving as zero
any distance greater than max_distance.
Parameters
----------
other : KDTree
max_distance : positive float
p : float, optional
Returns
-------
result : dok_matrix
Sparse matrix representing the results in "dictionary of keys" format.
"""
result = scipy.sparse.dok_matrix((self.n,other.n))
def traverse(node1, rect1, node2, rect2):
if rect1.min_distance_rectangle(rect2, p) > max_distance:
return
elif isinstance(node1, KDTree.leafnode):
if isinstance(node2, KDTree.leafnode):
for i in node1.idx:
for j in node2.idx:
d = minkowski_distance(self.data[i],other.data[j],p)
if d <= max_distance:
result[i,j] = d
else:
less, greater = rect2.split(node2.split_dim, node2.split)
traverse(node1,rect1,node2.less,less)
traverse(node1,rect1,node2.greater,greater)
elif isinstance(node2, KDTree.leafnode):
less, greater = rect1.split(node1.split_dim, node1.split)
traverse(node1.less,less,node2,rect2)
traverse(node1.greater,greater,node2,rect2)
else:
less1, greater1 = rect1.split(node1.split_dim, node1.split)
less2, greater2 = rect2.split(node2.split_dim, node2.split)
traverse(node1.less,less1,node2.less,less2)
traverse(node1.less,less1,node2.greater,greater2)
traverse(node1.greater,greater1,node2.less,less2)
traverse(node1.greater,greater1,node2.greater,greater2)
traverse(self.tree, Rectangle(self.maxes, self.mins),
other.tree, Rectangle(other.maxes, other.mins))
return result
def distance_matrix(x, y, p=2, threshold=1000000):
"""
Compute the distance matrix.
Returns the matrix of all pair-wise distances.
Parameters
----------
x : (M, K) array_like
TODO: description needed
y : (N, K) array_like
TODO: description needed
p : float, 1 <= p <= infinity
Which Minkowski p-norm to use.
threshold : positive int
If ``M * N * K`` > `threshold`, algorithm uses a Python loop instead
of large temporary arrays.
Returns
-------
result : (M, N) ndarray
Distance matrix.
Examples
--------
>>> distance_matrix([[0,0],[0,1]], [[1,0],[1,1]])
array([[ 1. , 1.41421356],
[ 1.41421356, 1. ]])
"""
x = np.asarray(x)
m, k = x.shape
y = np.asarray(y)
n, kk = y.shape
if k != kk:
raise ValueError("x contains %d-dimensional vectors but y contains %d-dimensional vectors" % (k, kk))
if m*n*k <= threshold:
return minkowski_distance(x[:,np.newaxis,:],y[np.newaxis,:,:],p)
else:
result = np.empty((m,n),dtype=np.float) # FIXME: figure out the best dtype
if m < n:
for i in range(m):
result[i,:] = minkowski_distance(x[i],y,p)
else:
for j in range(n):
result[:,j] = minkowski_distance(x,y[j],p)
return result
| bsd-3-clause | 49,220,198,291,345,260 | 37.58781 | 137 | 0.524135 | false |
epuzanov/ZenPacks.community.HPMon | ZenPacks/community/HPMon/modeler/plugins/community/snmp/HPDaLogDrvMap.py | 1 | 2623 | ################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008-2012 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPDaLogDrvMap
HPDaLogDrvMap maps the cpqDaLogDrvTable to disks objects
$Id: HPDaLogDrvMap.py,v 1.4 2012/10/27 16:34:20 egor Exp $"""
__version__ = '$Revision: 1.4 $'[11:-2]
from Products.DataCollector.plugins.CollectorPlugin import GetTableMap
from HPLogicalDiskMap import HPLogicalDiskMap
class HPDaLogDrvMap(HPLogicalDiskMap):
"""Map HP/Compaq insight manager DA Logical Disk tables to model."""
maptype = "HPDaLogDrvMap"
modname = "ZenPacks.community.HPMon.cpqDaLogDrv"
snmpGetTableMaps = (
GetTableMap('cpqDaLogDrvTable',
'.1.3.6.1.4.1.232.3.2.3.1.1',
{
'.3': 'diskType',
'.4': 'status',
'.9': 'size',
'.13': 'stripesize',
'.14': 'description',
}
),
)
diskTypes = {1: 'other',
2: 'RAID0',
3: 'RAID1',
4: 'RAID10',
5: 'RAID5',
6: 'RAID1E',
7: 'RAID6',
8: 'RAID50',
9: 'RAID60',
10: 'RAID1 ADM',
11: 'RAID10 ADM',
}
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
getdata, tabledata = results
if not device.id in HPLogicalDiskMap.oms:
HPLogicalDiskMap.oms[device.id] = []
for oid, disk in tabledata.get('cpqDaLogDrvTable', {}).iteritems():
try:
om = self.objectMap(disk)
om.snmpindex = oid.strip('.')
om.id=self.prepId("LogicalDisk%s"%om.snmpindex).replace('.','_')
om.diskType = self.diskTypes.get(getattr(om, 'diskType', 1),
'%s (%d)' %(self.diskTypes[1], om.diskType))
om.stripesize = getattr(om, 'stripesize', 0) * 1024
om.size = getattr(om, 'size', 0) * 1048576
except AttributeError:
continue
HPLogicalDiskMap.oms[device.id].append(om)
return
| gpl-2.0 | 4,412,742,039,139,107,000 | 35.430556 | 80 | 0.487228 | false |
singleswitch/ticker | ticker.py | 1 | 31018 | #! /usr/bin/env python
import sys, os,time, shutil, copy
import numpy as np
from PyQt4 import QtCore, QtGui,QtNetwork
from ticker_layout import Ui_MainWindow
from ticker_widgets import ChannelVisualisation, AlphabetLabel, ClickGraphScene
from ticker_widgets import DictionaryDisplay,InstructionsDisplay,SentenceDisplay
from ticker_audio import Audio
from channel_config import ChannelConfig
from ticker_core import TickerCore
from settings_editor import VolumeEditWidget, SettingsEditWidget
from utils import Utils
from click_distr import ClickDistribution
class TickerGui(QtGui.QMainWindow, Ui_MainWindow):
##################################### Init
def __init__(self, i_settings_dir="./settings/", i_settings_file="settings.cPickle"):
t=time.time()
QtGui.QMainWindow.__init__(self)
self.setupUi(self)
self.utils = Utils()
#######################################################################
#Widget instantiation
#######################################################################
self.settings_editor = SettingsEditWidget(self.centralwidget)
channel_index = self.settings_editor.box_channels.currentIndex()
overlap = self.getAudioOverlap()
self.cur_dir = os.path.dirname(os.path.abspath(__file__)) + "/"
file_length = self.settings_editor.box_file_length.value()
self.channel_config = ChannelConfig(channel_index + 1, overlap, file_length, self.cur_dir)
self.settings_editor.hide()
self.alphabet_label_letter_likelihoods = AlphabetLabel(self.label_letter_likelihoods)
self.letter_likelihood_display = ChannelVisualisation( self.alphabet_likelihood_view, self.centralwidget, self.alphabet_label_letter_likelihoods )
self.channel_names_display = ChannelVisualisation( self.channel_names, self.centralwidget )
self.alphabet_display = ChannelVisualisation(self.alphabet_view, self.centralwidget )
self.click_pdf_display = ClickGraphScene( parent=self.click_distribution_view,
i_title=self.label_click_distribution,
xlabel="click time (seconds)", ylabel="P(letter | click time)")
self.audio = Audio()
self.click_pdf = ClickDistribution()
word_thresh = self.settings_editor.box_word_select.value()
self.ticker_core = TickerCore(word_thresh, self.cur_dir + "dictionaries/nomon_dict.txt")
self.dictionary_display = DictionaryDisplay( self.best_words_disp, self.centralwidget, i_title=None)
self.instructions_display = InstructionsDisplay(self.label_instructions, self.centralwidget, i_title=None)
self.sentence_display = SentenceDisplay(self.selected_words_disp, self.centralwidget, i_title=None)
#Main time calls update audio
self.main_timer = QtCore.QTimer()
#Best timer: after the alphabet has been played to the user, this time interval will pass before starting again
self.best_letters_timer = QtCore.QTimer()
#Give some time before continuing to the next letter
self.best_letters_timer.setInterval(1000*self.settings_editor.box_end_delay.value())
self.best_letters_timer.setSingleShot(True)
self.volume_editor = VolumeEditWidget(self.centralwidget)
self.volume_editor.setChannelConfig(self.channel_config)
#Hide/show wigets
self.initManualHideFlags()
self.volume_editor.hide()
self.hidePhrases()
self.actionAlphabet(False)
#######################################################################
#Complete initialisation of separate components, and connects all signals
#######################################################################
self.label_letter_speed.setText(QtCore.QString("Speed: %0.2f" % overlap))
#Keybord clicks - if False control happens via something else e.g., software port
self.enable_clicks = True
#The number of alphabet repetions
self.repeat_count = 0
#Calibrate variables
self.calibration_word = "yes_"
self.calibrated = False
self.manual_calibration = False #Change calibration mode flag manually (not through button press)
#Pause/play
self.restart = True
##########################################################################
#The port settings
##########################################################################
self.socket = QtNetwork.QUdpSocket()
is_socket = self.socket.bind(QtNetwork.QHostAddress(0),20320)
if not is_socket:
print "Binding of socket was unsuccessful"
else:
QtCore.QObject.connect(self.socket, QtCore.SIGNAL("readyRead()"), self.readPendingDatagrams)
##########################################################################
#Start main
##########################################################################
self.initSettings(i_settings_dir, i_settings_file)
self.__connectSignals()
#Start the application by pretending to close the settings editor which resets everything
self.setFastMode(self.action_fast_mode.isChecked())
self.setAudioOverlap()
self.initSize()
print "Total startup time, ticker = ", time.time() - t, " seconds " #Connect signal/slots for all the action items
#Reset everything - clickdistr estc
def reset(self):
self.audio.stop()
self.best_letters_timer.setInterval(1000*self.settings_editor.box_end_delay.value())
file_length = self.settings_editor.box_file_length.value()
nchannels = self.settings_editor.getCurrentChannel()
print "Calling set channel: ", nchannels, " file length = ", file_length, " wait time = ", self.best_letters_timer.interval()
self.calibrated = False
self.clearAll()
self.channel_config.setChannels(nchannels, file_length)
self.audio.setChannels(nchannels)
alphabet = self.channel_config.getAlphabetLoader().getUniqueAlphabet( i_with_spaces=True, i_group=True)
self.letter_likelihood_display.setChannels(alphabet)
alphabet = self.channel_config.getChannelNames()
self.channel_names_display.setChannels(alphabet)
alphabet = self.channel_config.getAlphabetLoader().getAlphabet( i_with_spaces=True, i_group=True)
self.alphabet_display.setChannels( alphabet)
self.setClickDistrParams()
self.ticker_core.setClickDistr(self.click_pdf)
self.initDisplayForNewWord()
#The volume
self.volume_editor.setChannelConfig(self.channel_config)
#Make sure the volumes are set correctly
for channel in range(0, nchannels):
self.setVolume(self.volume_editor.getVolume(channel), channel)
def initDisplayForNewWord(self):
(words, word_probs) = self.ticker_core.getBestWordProbs(16)
self.dictionary_display.update(words,word_probs)
self.setInstructLetterSelectStr()
self.letter_likelihood_display.clear()
self.drawClickDistribution()
def initSize(self):
width = 1100
if (not self.alphabet_hidden) and (not self.phrases_hidden):
self.resize(width, 830)
elif not self.alphabet_hidden:
self.resize(width, 730)
elif not self.phrases_hidden:
self.resize(width, 590)
else:
self.resize(width, 530)
def initManualHideFlags(self):
self.phrases_hidden = True
self.alphabet_hidden = True
def initSettings(self, i_settings_dir, i_settings_file):
#Save the settings to default file
self.settings_dir = i_settings_dir
self.default_file = self.settings_dir + "default_settings.cPickle"
self.setClickDistrParams()
self.ticker_core.setClickDistr(self.click_pdf)
self.click_pdf.initHistogram()
self.saveSettings(self.default_file)
self.settings_file = i_settings_dir + i_settings_file
if os.path.exists(self.settings_file):
self.loadSettings(self.settings_file)
def initGaussDistribution(self):
self.setClickDistrParams()
self.click_pdf.initHistogram()
self.drawClickDistribution()
def __connectSignals(self):
#Menubar actions
QtCore.QObject.connect( self.action_settings, QtCore.SIGNAL("triggered(bool)"), self.actionSettings)
QtCore.QObject.connect( self.action_dictionary, QtCore.SIGNAL("triggered(bool)"), self.actionEditDictionary)
QtCore.QObject.connect( self.action_close, QtCore.SIGNAL("triggered(bool)"), self.actionCloseApplication)
QtCore.QObject.connect( self.action_clear, QtCore.SIGNAL("triggered(bool)"), self.actionClear)
QtCore.QObject.connect( self.action_alphabet, QtCore.SIGNAL("toggled(bool)"), self.actionAlphabet)
QtCore.QObject.connect( self.alphabet_widget_display, QtCore.SIGNAL("visibilityChanged(bool)"), self.actionCloseAlphabet)
QtCore.QObject.connect( self.action_volume, QtCore.SIGNAL("triggered(bool)"), self.actionVolume)
QtCore.QObject.connect( self.action_calibrate, QtCore.SIGNAL("toggled(bool)"), self.setCalibration)
QtCore.QObject.connect( self.action_fast_mode, QtCore.SIGNAL("toggled(bool)"), self.setFastMode)
QtCore.QObject.connect( self.action_practise, QtCore.SIGNAL("toggled(bool)"), self.setPractise)
QtCore.QObject.connect( self.action_open, QtCore.SIGNAL("triggered(bool)"), self.loadSettingsDialog)
QtCore.QObject.connect( self.action_save, QtCore.SIGNAL("triggered(bool)"), self.saveSettingsDialog)
#Speed scrollbar
QtCore.QObject.connect( self.scrollbar_letter_speed, QtCore.SIGNAL("sliderReleased()"), self.setAudioOverlap )
QtCore.QObject.connect( self.scrollbar_letter_speed, QtCore.SIGNAL("sliderMoved(int)"), self.setSliderLabel )
#Start/stop/pause
QtCore.QObject.connect( self.clear_button, QtCore.SIGNAL("clicked(bool)"), self.startSoundFalse )
#Pause/unpause
QtCore.QObject.connect( self.button_pause, QtCore.SIGNAL("clicked(bool)"), self.pauseSlot )
#Timers
QtCore.QObject.connect( self.main_timer, QtCore.SIGNAL("timeout()"), self.updateAudio)
QtCore.QObject.connect( self.best_letters_timer, QtCore.SIGNAL("timeout()"), self.processClicks)
#Volume editor
QtCore.QObject.connect( self.volume_editor, QtCore.SIGNAL("volume(float,int)"), self.setVolume)
#Settings editor - on closing the settings Ticker registers it
QtCore.QObject.connect( self.settings_editor, QtCore.SIGNAL("close_settings"), self.reset)
QtCore.QObject.connect( self.settings_editor, QtCore.SIGNAL("edit_click_params"), self.drawClickDistribution)
QtCore.QObject.connect( self.settings_editor.button_default, QtCore.SIGNAL("released()"), self.loadDefaultSettings)
QtCore.QObject.connect( self.settings_editor.button_gauss, QtCore.SIGNAL("released()"), self.initGaussDistribution)
##################################### Main functions
def processClicks(self):
self.audio.clear()
#No clicks received
if not self.ticker_core.clicksReceived():
self.processAlphabetRepetions()
return
#Clicks were received - process it
self.repeat_count = 0
selected_word = self.processWordSelections()
if selected_word is not None:
#Update values in settings editor
self.settings_editor.setClickParams(self.settings_editor.clickPdfToSettingsParams(self.click_pdf.getParams()))
return
self.updateNextLetter()
#Call this function if the click has to be processed
def processClick(self):
is_ready = not self.audio.isPlayingInstructions()
if not is_ready:
is_ready = self.audio.isReady()
if not is_ready:
return False
click_time = self.audio.getTime(self.channel_config)
print "In Ticker, click received, click_time = ", click_time
click_log_scores = np.array(self.ticker_core.newClick(np.float64(click_time)))
n_clicks = self.ticker_core.getNumberClicks()
#Undo
if n_clicks >= self.settings_editor.box_undo.value():
self.undoLastLetter()
return True
#Do not undo process clicks
click_log_sum = self.utils.expTrick( click_log_scores.reshape([1, len(click_log_scores) ]) )[0]
alpha_weights = np.exp(click_log_scores - click_log_sum)
self.letter_likelihood_display.setAlphaWeights(alpha_weights)
if self.action_practise.isChecked():
print "************************************************************"
print "Letter scores"
print "************************************************************"
for (n, letter) in enumerate(self.ticker_core.click_distr.alphabet):
ltimes = self.ticker_core.click_distr.loc[n,:]
print "%s prob=%1.3f, log_score=%2.3f" % (letter,alpha_weights[n], click_log_scores[n]),
print " click time=%1.3f, letter_time=(%2.3f,%2.3f)" % (click_time,ltimes[0],ltimes[1]),
print " delta=(%2.3f,%2.3f)" % (click_time-ltimes[0],click_time-ltimes[1])
return False
def undoLastLetter(self, i_play_audio=True):
self.ticker_core.undoLastLetter()
self.repeat_count = 0
self.audio.restart()
self.letter_likelihood_display.clear()
if i_play_audio:
cmd_str = ["undo", "repeat"]
self.playCurrentLetterIndex(cmd_str)
def playCurrentLetterIndex(self, i_cmd_str=[]):
cmd_str = list(i_cmd_str)
letter_idx = self.instructions_display.letter_dict[self.ticker_core.getLetterIndex() + 1]
cmd_str.extend(letter_idx.split(" "))
cmd_str.append("letter")
self.audio.playInstructions(cmd_str)
def updateNextLetter(self):
"""Update if selected word is None, proceeding to the next letter."""
(words, word_probs) = self.ticker_core.getBestWordProbs(10)
self.dictionary_display.update(words,word_probs)
self.setInstructLetterSelectStr()
self.letter_likelihood_display.clear()
self.audio.playInstructions(["next"])
def processWordSelections(self):
#Check if we're busy with the calibration
is_calibrated = (not self.action_calibrate.isChecked()) or self.calibrated
is_process = is_calibrated and (not self.action_practise.isChecked())
selected_word = self.ticker_core.newLetter(i_process_word_selections=is_process)
if self.action_practise.isChecked():
return
#Calibrating if process_word_selections = False
if not is_calibrated:
selected_word = self.processWordSelectCalibrating()
#No word was selected
if selected_word is None:
return
#A word was selected
if selected_word == ".":
self.sentence_display.update(selected_word)
else:
self.sentence_display.update(selected_word[0:-1])
#Don't play "new word" is a word was selected at calibration - this will be done when unchecking the calibration box
self.newWord(selected_word, i_is_word_selected=True, i_play_new_word=is_calibrated)
if not is_calibrated:
self.manual_calibration = True
self.action_calibrate.setChecked(False)
return selected_word
def processWordSelectCalibrating(self):
letter_idx = self.ticker_core.getLetterIndex()
#only use the minimum number of alphabet repetions (with clicks) to select
#the calibration word and initialise the click distribution with.
if letter_idx < len(self.calibration_word):
return
selected_word = str(self.calibration_word)
print "In Ticker process word selection trainClickDistrAndInit, selected_word = ", selected_word
self.ticker_core.trainClickDistrAndInitialise(selected_word)
self.calibrated = True
return selected_word
def processAlphabetRepetions(self):
if self.action_practise.isChecked():
return
#Process the number of times the alphabet sequence has been repeated when no clicks were received
self.repeat_count += 1
shut_down_repeat = self.settings_editor.box_shut_down.value()
word_repeat = self.settings_editor.box_restart_word.value()
prog_repeat = self.settings_editor.box_prog_status.value()
is_shut_down = self.repeat_count % shut_down_repeat
is_new_word = self.repeat_count % word_repeat
is_prog_status = self.repeat_count % prog_repeat
if is_shut_down is 0:
repetion = self.instructions_display.letter_dict[shut_down_repeat]
cmd_str = (repetion + " repetition reached shutting down").split(" ")
self.audio.playInstructions(cmd_str)
t = time.time()
while self.audio.isPlayingInstructions():
if (time.time() - t) > 5:
break
if (time.time() - t) > 0.05:
self.audio.update(self.channel_config)
self.startSoundFalse()
elif is_new_word is 0:
self.newWord(i_play_new_word=False)
self.audio.playInstructions(["undo", "restart", "word"])
elif is_prog_status is 0:
self.playCurrentLetterIndex()
#else:
# self.audio.playInstructions(["beep"])
def newWord(self, i_extra_command=None, i_is_word_selected=True, i_play_new_word=True):
self.ticker_core.newWord()
self.initDisplayForNewWord()
if self.action_practise.isChecked():
return
if i_extra_command is not None:
if i_is_word_selected:
self.audio.synthesiseWord(i_extra_command)
else:
self.audio.synthesise(i_extra_command)
if i_play_new_word:
self.audio.playInstructions(self.newWordStr())
#These functions are update functions synchronised with the GUI timer
def updateAudio(self):
if self.best_letters_timer.isActive():
(is_read_next, is_update_time, is_first_letter) = self.audio.update(self.channel_config, i_loop=False)
return (is_read_next, is_update_time, is_first_letter)
(is_read_next, is_update_time, is_first_letter) = self.audio.update(self.channel_config)
if is_read_next and (not self.best_letters_timer.isActive()):
self.audio.readTime(self.channel_config)
self.best_letters_timer.start()
return (is_read_next, is_update_time, is_first_letter)
self.best_letters_timer.stop()
if self.alphabet_display.isVisible():
sound_index = self.audio.getSoundIndex(self.channel_config)
self.alphabet_display.setColumnFocus(sound_index)
return (is_read_next, is_update_time, is_first_letter)
def drawClickDistribution(self, i_gauss_params=None):
self.click_pdf_display.drawClickDistribution(self.click_pdf.getHistogramRects())
settings = self.settings_editor.getSettings()
self.click_pdf_display.setView(settings['delay'],settings['std'])
def waitAudioReady(self, i_commands=None):
if i_commands is not None:
self.audio.playInstructions(i_commands)
while self.audio.isPlayingInstructions() or (not self.audio.isReady()):
self.audio.update(self.channel_config)
######################################### Settings
def saveSettingsDialog(self):
self.startSoundFalse()
(disp_str, dir, files) = self.getSettingsDir()
filename = QtGui.QFileDialog.getSaveFileName( self, disp_str, dir , files )
print "SAVE SETTINGS, filename = ", filename
if len(filename) > 0:
self.saveSettings(filename)
def saveSettings(self, i_file):
#i_init_settings_backup=True: Initialising the settings
settings = dict(self.getSettings())
settings['click_pdf'] = copy.deepcopy(self.click_pdf)
print "SAVING SETTINGS"
print "d = ", settings['delay'], " std = ", settings['std'], " fr = ", settings['fr'], " ", settings['fp_rate']
self.utils.savePickle(settings,i_file)
def loadSettingsDialog(self):
self.startSoundFalse()
(disp_str, dir, files) = self.getSettingsDir()
filename = QtGui.QFileDialog.getOpenFileName( self,disp_str, dir , files )
if len(filename) > 0:
print "LOADNG SETTINGS, filename = ", filename
self.loadSettings(filename)
def loadDefaultSettings(self):
print "LOADING DEFAULT SETTTING"
self.loadSettings(self.default_file)
self.drawClickDistribution()
def loadSettings(self, i_file):
print "loading settings from filename = ", i_file
settings = self.utils.loadPickle(i_file)
print "GOT SETTINGS:"
print "delay = ", settings['delay'], " std = ", settings['std']
#Settings editor
self.settings_editor.setSettings(settings)
#The speed
self.scrollbar_letter_speed.setValue(int(settings['overlap']*100))
self.__setAudioOverlap()
self.best_letters_timer.setInterval(1000*self.settings_editor.box_end_delay.value())
print "SPEED = ", settings['overlap'], " WAIT TIME = ", self.best_letters_timer.interval()
#Mode
self.action_fast_mode.setChecked(settings['fast_mode'])
self.action_tutorial.setChecked(settings['tutorial'])
self.action_calibrate.setChecked(settings['calibrate'])
self.action_practise.setChecked(settings['practise'])
self.action_inc_phrases.setChecked(settings['inc phrases'])
#The click pdf
self.click_pdf = copy.deepcopy(settings['click_pdf'])
self.ticker_core.setClickDistr(self.click_pdf)
print "LOADED SETTINGS: click_distr params = "
click_params = self.click_pdf.getParams()
(delay, std, fr, fp_rate) = click_params
print "delay = ", delay, " std = ", std, " fr = ", fr, " fp rate = ", fp_rate
print "LOADED SETTINGS: settings params = "
s = self.settings_editor.getSettings()
print "delay = ", s['delay'], " std = ", s['std']
#################################### Start/Stop/Pause/Close
def pauseSlot(self, i_checked):
if i_checked:
self.pauseTrue()
if self.restart:
self.restart = False
self.startSoundTrue()
self.newWord()
self.main_timer.start(10)
else:
self.pauseFalse()
def pauseTrue(self, i_play_cur_letter_idx=True):
self.audio.setChannels(self.settings_editor.getCurrentChannel())
if i_play_cur_letter_idx:
self.playCurrentLetterIndex()
self.button_pause.setChecked(True)
self.button_pause.setText("Pause")
def pauseFalse(self, i_undo_last=True):
self.button_pause.setText("Play")
self.button_pause.setChecked(False)
self.stopTimers()
self.audio.stop()
if i_undo_last:
self.undoLastLetter(i_play_audio=False)
def startSoundTrue(self):
self.clearAll()
self.audio.restart()
def startSoundFalse(self):
self.clearAll()
self.audio.stop()
self.pauseFalse(i_undo_last=False)
self.restart = True
def closeEvent(self, event):
self.startSoundFalse()
while not self.audio.isReady():
continue
QtGui.QMainWindow.close(self)
def clearAll(self):
self.stopTimers()
self.repeat_count = 0
def stopTimers(self):
self.main_timer.stop()
self.best_letters_timer.stop()
#################################### Switch Events
def keyPressEvent(self, event):
if (event.key() == QtCore.Qt.Key_Space) and self.enable_clicks and (not self.button_pause.isChecked()):
self.pauseSlot(True)
if (not self.enable_clicks) or (not self.action_space_bar.isChecked()):
return
if event.key() == QtCore.Qt.Key_Space:
self.processClick()
#Enable/Disable Keyboard events
def enableClicks(self):
self.enable_clicks = True
def disableClicks(self):
self.enable_clicks = False
#Software port communication
def readPendingDatagrams(self):
if not self.action_port.isChecked():
return
while (self.socket.hasPendingDatagrams()):
max_len = self.socket.pendingDatagramSize()
(data, host, port) = self.socket.readDatagram (max_len)
for n in range(0, max_len):
self.processClick()
self.audio.playClick()
##################################### Feedback Phrases
def setInstructLetterSelectStr(self):
if self.action_practise.isChecked():
return
letter_idx = self.ticker_core.getLetterIndex()
disp_str = str(self.instructions_display.getInstructSentence(letter_idx+1))
if (not self.calibrated) and self.action_calibrate.isChecked():
disp_str += (" " + self.calibration_word )
letter = self.calibration_word[letter_idx]
self.instructions_display.update(disp_str)
def newWordStr(self):
if (not self.calibrated) and self.action_calibrate.isChecked():
instruct_str = ["calibrating"]
else:
instruct_str = ["start"]
return instruct_str
##################################### Set functions
def setClickDistrParams(self):
s = self.settings_editor.getSettings()
self.click_pdf.setParams(s['is_train'], self.channel_config, s['delay'], s['std'], s['fp_rate'],
s['fr'], s['learning_rate'],s['end_delay'])
self.click_pdf.setFixLearning(s['learn_delay'], s['learn_std'], s['learn_fp'], s['learn_fr'])
def setSliderLabel(self, i_value):
overlap = "%.2f" % (i_value/100.0)
self.label_letter_speed.setText(QtCore.QString("Speed: %s" % overlap))
def setAudioOverlap(self):
self.startSoundFalse()
overlap = self.__setAudioOverlap()
file_length = self.settings_editor.box_file_length.value()
self.channel_config.setOverlap(overlap, file_length)
self.reset()
def __setAudioOverlap(self):
overlap = self.getAudioOverlap()
self.setSliderLabel(100.0 * overlap)
return overlap
def setVolume(self, i_val, i_channel):
self.audio.setVolume(i_val, i_channel)
def setCalibration(self, i_checked):
self.calibrated = not i_checked
if not self.manual_calibration:
self.startSoundFalse()
self.manual_calibration = False
def setPractise(self, i_checked):
self.startSoundFalse()
def setFastMode(self, i_checked):
self.startSoundFalse()
if i_checked:
alphabet_dir = "alphabet_fast/"
self.settings_editor.box_file_length.setValue(0.21)
else:
alphabet_dir = "alphabet_slow/"
self.settings_editor.box_file_length.setValue(0.4)
self.audio.setAlphabetDir(alphabet_dir)
self.reset()
##################################### Get functions
def isBusyCalibrating(self):
return (self.action_calibrate.isChecked()) and (not self.calibrated)
def getAudioOverlap(self):
val = 0.01*float(self.scrollbar_letter_speed.value())
val = float(str( "%.2f" % val))
return val
def getSettings(self):
settings = self.settings_editor.getSettings()
#Speed and Channel settings
settings['overlap'] = self.getAudioOverlap()
#Mode
settings['fast_mode'] = self.action_fast_mode.isChecked()
settings['tutorial'] = self.action_tutorial.isChecked()
settings['calibrate'] = self.action_calibrate.isChecked()
settings['practise'] = self.action_practise.isChecked()
settings['inc phrases'] = self.action_inc_phrases.isChecked()
#Click pdf
settings['click_pdf'] = copy.deepcopy(self.click_pdf)
return settings
def getSettingsDir(self):
disp_str = "Select output file"
files = "cPickle Files (*.cPickle)"
return (disp_str, self.settings_dir, files)
##################################### Actions
def actionClear(self):
self.sentence_display.clear()
self.startSoundFalse()
def actionSettings(self, i_checked):
self.startSoundFalse()
self.settings_editor.show()
def actionVolume(self, i_checked):
self.volume_editor.show()
def actionAlphabet(self, i_checked):
self.action_alphabet.setChecked(i_checked)
if i_checked:
self.showAlphabetSeq()
else:
self.hideAlphabetSeq()
def actionCloseAlphabet(self, i_visible):
if not i_visible:
self.action_alphabet.setChecked(False)
self.adjustSize()
def actionEditDictionary(self, i_trigger):
self.startSoundFalse()
dir = self.cur_dir + "dictionaries/"
filename = QtGui.QFileDialog.getOpenFileName( self, "Select dictionary", dir, "Text Files (*.txt)");
if len(filename) > 0:
self.ticker_core.setDict(filename)
self.initDisplayForNewWord()
def actionCloseApplication(self):
self.close()
################################################# Hide show functions
def hidePhrases(self):
self.phrases_hidden = True
self.phrase_disp.hide()
self.label_phrases.hide()
self.initSize()
def showPhrases(self):
self.phrases_hidden = False
self.phrase_disp.show()
self.label_phrases.show()
self.initSize()
def hideAlphabetSeq(self):
self.alphabet_hidden = True
self.alphabet_widget_display.hide()
self.initSize()
def showAlphabetSeq(self):
self.alphabet_hidden = False
self.alphabet_widget_display.show()
self.initSize()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
gui = TickerGui()
gui.show()
sys.exit( app.exec_())
| mit | -7,585,681,300,317,747,000 | 45.157738 | 155 | 0.611903 | false |
redhat-openstack/ceilometer | ceilometer/tests/unit/test_messaging.py | 10 | 2520 | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import fixture as fixture_config
import oslo_messaging.conffixture
from oslotest import base
from ceilometer import messaging
class MessagingTests(base.BaseTestCase):
def setUp(self):
super(MessagingTests, self).setUp()
self.CONF = self.useFixture(fixture_config.Config()).conf
self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF))
def test_get_transport_invalid_url(self):
self.assertRaises(oslo_messaging.InvalidTransportURL,
messaging.get_transport, "notvalid!")
def test_get_transport_url_caching(self):
t1 = messaging.get_transport('fake://')
t2 = messaging.get_transport('fake://')
self.assertEqual(t1, t2)
def test_get_transport_default_url_caching(self):
t1 = messaging.get_transport()
t2 = messaging.get_transport()
self.assertEqual(t1, t2)
def test_get_transport_default_url_no_caching(self):
t1 = messaging.get_transport(cache=False)
t2 = messaging.get_transport(cache=False)
self.assertNotEqual(t1, t2)
def test_get_transport_url_no_caching(self):
t1 = messaging.get_transport('fake://', cache=False)
t2 = messaging.get_transport('fake://', cache=False)
self.assertNotEqual(t1, t2)
def test_get_transport_default_url_caching_mix(self):
t1 = messaging.get_transport()
t2 = messaging.get_transport(cache=False)
self.assertNotEqual(t1, t2)
def test_get_transport_url_caching_mix(self):
t1 = messaging.get_transport('fake://')
t2 = messaging.get_transport('fake://', cache=False)
self.assertNotEqual(t1, t2)
def test_get_transport_optional(self):
self.CONF.set_override('rpc_backend', '')
self.assertIsNone(messaging.get_transport(optional=True,
cache=False))
| apache-2.0 | 4,521,507,094,438,750,700 | 37.769231 | 75 | 0.676587 | false |
angr/angr | angr/storage/memory_mixins/size_resolution_mixin.py | 1 | 5667 | from typing import Optional
import logging
from . import MemoryMixin
from ...errors import SimMemoryLimitError, SimMemoryError, SimUnsatError
l = logging.getLogger(__name__)
class SizeNormalizationMixin(MemoryMixin):
"""
Provides basic services related to normalizing sizes. After this mixin, sizes will always be a plain int.
Assumes that the data is a BV.
- load will throw a TypeError if no size is provided
- store will default to len(data)//byte_width if no size is provided
"""
def load(self, addr, size=None, **kwargs):
if size is None:
raise TypeError("Must provide size to load")
elif type(size) is int:
out_size = size
elif getattr(size, 'op', None) == 'BVV':
out_size = size.args[0]
else:
raise Exception("Size must be concretely resolved by this point in the memory stack")
return super().load(addr, size=out_size, **kwargs)
def store(self, addr, data, size=None, **kwargs):
max_size = len(data) // self.state.arch.byte_width
if size is None:
out_size = max_size
elif type(size) is int:
out_size = size
elif getattr(size, 'op', None) == 'BVV':
out_size = size.args[0]
else:
raise Exception("Size must be concretely resolved by this point in the memory stack")
if out_size > max_size:
raise SimMemoryError("Not enough data for store")
if out_size == 0:
# skip zero-sized stores
return
super().store(addr, data, size=out_size, **kwargs)
class SizeConcretizationMixin(MemoryMixin):
"""
This mixin allows memory to process symbolic sizes. It will not touch any sizes which are not ASTs with non-BVV ops.
Assumes that the data is a BV.
- symbolic load sizes will be concretized as their maximum and a warning will be logged
- symbolic store sizes will be dispatched as several conditional stores with concrete sizes
"""
def __init__(self, concretize_symbolic_write_size: bool=False, max_concretize_count: Optional[int]=256,
max_symbolic_size: int=0x400000, raise_memory_limit_error: bool=False, size_limit: int=257, **kwargs):
super().__init__(**kwargs)
self._concretize_symbolic_write_size = concretize_symbolic_write_size # in place of the state option CONCRETIZE_SYMBOLIC_WRITE_SIZES
self._max_concretize_count = max_concretize_count
self._max_symbolic_size = max_symbolic_size
self._raise_memory_limit_error = raise_memory_limit_error
self._size_limit = size_limit
def copy(self, memo):
o = super().copy(memo)
o._concretize_symbolic_write_size = self._concretize_symbolic_write_size
o._max_concretize_count = self._max_concretize_count
o._max_symbolic_size = self._max_symbolic_size
o._raise_memory_limit_error = self._raise_memory_limit_error
o._size_limit = self._size_limit
return o
def load(self, addr, size=None, **kwargs):
if getattr(size, 'op', 'BVV') == 'BVV':
return super().load(addr, size=size, **kwargs)
l.warning("Loading symbolic size via max. be careful.")
out_size = self.state.solver.max(size)
return super().load(addr, size=out_size, **kwargs)
def store(self, addr, data, size=None, condition=None, **kwargs):
if getattr(size, 'op', 'BVV') == 'BVV':
super().store(addr, data, size=size, condition=condition, **kwargs)
return
max_size = len(data) // self.state.arch.byte_width
try:
if self._raise_memory_limit_error:
conc_sizes = list(self.state.solver.eval_upto(
size,
self._size_limit,
extra_constraints=(size <= max_size,)
)
)
if len(conc_sizes) == self._size_limit:
raise SimMemoryLimitError("Extremely unconstrained store size")
else:
conc_sizes = list(self.state.solver.eval_upto(
size,
self._max_concretize_count,
extra_constraints=(size <= max_size,)
)
)
except SimUnsatError:
# size has to be greater than max_size
raise SimMemoryError("Not enough data for store")
# filter out all concrete sizes that are greater than max_size
# Note that the VSA solver (used in static mode) cannot precisely handle extra constraints. As a result, we may
# get conc_sizes with values that violate the extra constraint (size <= max_size).
conc_sizes = [ cs for cs in conc_sizes if cs <= max_size ]
conc_sizes.sort()
if not conc_sizes:
raise SimMemoryError("Not enough data for store")
if self._max_concretize_count is not None:
conc_sizes = conc_sizes[:self._max_concretize_count]
if size.symbolic:
if any(cs > self._max_symbolic_size for cs in conc_sizes):
l.warning("At least one concretized size is over the limit of %d bytes. Constrain them to the limit.",
self._max_symbolic_size)
conc_sizes = [min(cs, self._max_symbolic_size) for cs in conc_sizes]
if condition is None:
condition = self.state.solver.true
for conc_size in conc_sizes:
if conc_size == 0:
continue
super().store(addr, data, size=conc_size, condition=condition & (size == conc_size), **kwargs)
| bsd-2-clause | 4,801,087,622,439,213,000 | 40.669118 | 141 | 0.602965 | false |
fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extract33Fdtlx44WordpressCom.py | 1 | 1146 | def extract33Fdtlx44WordpressCom(item):
'''
Parser for '33fdtlx44.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
if item['tags'] == ['danmei']:
titlemap = [
('[TSOPUF] Chapter ', 'The Sound of Piano Under Fascism', 'translated'),
('[OP] Chapter ', 'The Old Photo', 'translated'),
('Tensei Shoujo no Rirekisho', 'Tensei Shoujo no Rirekisho', 'translated'),
('Master of Dungeon', 'Master of Dungeon', 'oel'),
]
for titlecomponent, name, tl_type in titlemap:
if titlecomponent.lower() in item['title'].lower():
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | bsd-3-clause | 694,607,589,493,561,900 | 33.757576 | 105 | 0.611693 | false |
0daylabs/WebXploiter-Docs | conf.py | 4 | 9255 | # -*- coding: utf-8 -*-
#
# WebXploit documentation build configuration file, created by
# sphinx-quickstart on Sun Aug 2 12:35:07 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'WebXploit'
copyright = u'2015, Anirudh Anand'
author = u'Anirudh Anand'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1 beta'
# The full version, including alpha/beta/rc tags.
release = '0.1 beta'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'WebXploitdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'WebXploit.tex', u'WebXploit Documentation',
u'Anirudh Anand', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'webxploit', u'WebXploit Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'WebXploit', u'WebXploit Documentation',
author, 'WebXploit', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 | 5,449,078,688,407,971,000 | 31.247387 | 79 | 0.707401 | false |
ellkae/win-pkg-manager | wpm_db.py | 1 | 14196 | ########################################################################
# Group: Windows Package Manager #2
# Name: Joshua Stein
# Group Members: Sebastian Imlay and Timothy James Telan
# Date: October 3, 2011
# Class that takes care of all database queries
########################################################################
import sqlite3, logging, sys
class db:
# __init__
# Parameters: dbFileName and logFileName are strings
# Exception: sqlite3.Error if db error
# Sets up all the tables for Windows Package Manager
def __init__(self, dbFileName, logFileName):
# Initialize Logger
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S', filename=logFileName, filemode='a')
# Connect/Create Database
self.commit = True
self.conn = sqlite3.connect(dbFileName)
self.cursor = self.conn.cursor()
# Check existance of each table and create if necessary
tableCheck = "SELECT name FROM sqlite_master WHERE name=? AND type='table'"
# Tuples with name names and strings of sql queries for table construction
tableNames = [("Statistics",),("Application",),("RegExpr",),("Scripts",),("Dependencies",),("Files",),("OldFiles",)]
tableConstruct = ["CREATE TABLE Statistics(ID INTEGER PRIMARY KEY, NumUpdatesNeeded INTEGER, NumSuccUpdates INTEGER, Date TEXT, Timestamp TEXT)",
"CREATE TABLE Application(ID INTEGER PRIMARY KEY, ApplicationName TEXT, CurrentVersionNum TEXT, DownloadURL TEXT, MainURL TEXT, UninstallFirst BOOLEAN, NumOldVersionsToKeep INTEGER, Timestamp TEXT)",
"CREATE TABLE RegExpr(ApplicationID INTEGER REFERENCES Application(ID), Expression TEXT, IsVersion Integer)",
"CREATE TABLE Scripts(ID INTEGER PRIMARY KEY, ApplicationID INTEGER REFERENCES Application(ID), Script TEXT, IsPre INTEGER)",
"CREATE TABLE Dependencies(ID INTEGER PRIMARY KEY, ApplicationID INTEGER REFERENCES Application(ID), Dependency INTEGER REFERENCES Application(ID))",
"CREATE TABLE Files(ID INTEGER PRIMARY KEY, ApplicationID INTEGER REFERENCES Application(ID), CurrEXEFileName TEXT, LocalEXELocation TEXT, EXEType TEXT)",
"CREATE TABLE OldFiles(ID INTEGER PRIMARY KEY, ApplicationID INTEGER REFERENCES Application(ID), OldEXEFileName TEXT, OldVersionNum TEXT, OldCount INTEGER)"]
# Add every table defined above to the database
numTables = len(tableNames)
for ix in range(numTables):
self.cursor.execute(tableCheck, tableNames[ix])
if self.cursor.fetchone() == None:
try:
self.cursor.execute(tableConstruct[ix])
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
self.conn.commit()
logging.info("Table Created: " + tableNames[ix][0])
# __del__
# Parameters: None
# Close database connection and save any changes made to the database
def __del__(self):
if self.commit:
self.conn.commit()
self.close()
# close
# Parameters: None
# Close database connection and save any changes made to the database, if needed
def close(self):
if self.commit:
self.conn.commit()
self.cursor.close()
self.conn.close()
logging.info("Database: Changes saved and connection closed")
# change_commit
# Parameters: commitBool is a boolean
# Returns: None
# Changes whether insert/update/delete commit after execution
def change_commit(self, commitBool):
self.conn.commit()
self.commit = commitBool
# rollback
# Parameters: None
# Returns: None
# Returns to last commit and turns commit back to True
def rollback(self):
self.conn.rollback()
self.cursor = self.conn.cursor()
self.commit = True
# retrieve
# Parameters: num is an integer, none indicates fetchall
# Returns: A list of query results
# Exception: TypeError if num is not an integer, sqlite3.Error if db error
# Retrieves data from from the database after a query
def retrieve(self, num=None):
if num == None:
data = self.cursor.fetchall()
logging.info("Database: Fetched all data from query")
return data
else:
try:
data = self.cursor.fetchmany(num)
logging.info("Database: Fetched " + str(num) + " entries from query")
return data
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except TypeError:
logging.exception("db Class: TypeError in retrieve")
return None
# query
# Parameters: tableName and appName are strings
# selectField is a list (or tuple) of table column names
# dependencyNames is True when want names of all apps current appName depends on
# timeRange
# Returns: True on success, exception otherwise
# Exception: sqlite3.Error if db error
# Performs a database query
def query(self, tableName=None, appName=None, selectField=('*',), dependencyNames = False, timeRange=[]):
# Table and application must be defined
if tableName == None or (appName == None and tableName != "Application" and tableName != "Statistics"):
return False
# Build sql queries
sF_Len = len(selectField) - 1
qField = ["SELECT ", selectField[0]]
for ix in range(sF_Len):
qField.append(", ")
qField.append(selectField[ix + 1])
# Treat Applications don't need to use ID restrictions may not be desired
if tableName == "Application":
if appName != None:
qField.append(" FROM Application WHERE ApplicationName=?")
else:
qField.append(" FROM Application")
# When dependency table and dependencyNames, combine tables
elif tableName == "Dependencies" and dependencyNames:
qField.append(" From Application, Dependencies WHERE Application.ID == Dependencies.Dependency AND ApplicationID IN (SELECT ApplicationID FROM Application WHERE ApplicationName=?)")
elif tableName == "Statistics":
if len(timeRange) == 2:
qField.append( " FROM Statistics WHERE Timestamp BETWEEN ? and ?")
elif len(timeRange) == 0:
qField.append( " FROM Statistics")
else:
return False
else:
qField.append(" FROM ")
qField.append(tableName)
qField.append(" WHERE ApplicationID IN (SELECT ID FROM Application WHERE ApplicationName=?)")
qField = "".join(qField)
# Execute sql queries
try:
if appName == None and tableName != "Statistics":
self.cursor.execute(qField)
elif tableName != "Statistics":
self.cursor.execute(qField, (appName,))
else:
self.cursor.execute(qField, timeRange)
logging.info("Database Query: Application: " + str(appName) + " -- Table: " + tableName + " -- Fields: " + str(selectField))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
# insert
# Parameters: tableName is a string
# fields is a list (or tuple) of column names
# data is a list (or tuple) of data values in proper order w.r.t. fields
# if table != Application, appName goes where appID would be
# Exception: sqlite3.Error if db error
# Insert data into a table
def insert(self, tableName=None, fields=None, data=None):
# All fields must be defined
if tableName == None or fields == None or data == None or len(fields) != len(data):
return False
f_len = len(fields) - 1
d_len = f_len
# Build query
qField = ["INSERT INTO ", tableName, "(", fields[0]]
for ix in range(f_len):
qField.append(", ")
qField.append(fields[ix + 1])
qField.append(") VALUES(?")
for ix in range(d_len):
qField.append(", ?")
qField.append(")")
qField = "".join(qField)
# Execute the query
if tableName == 'Application':
try:
self.cursor.execute(qField, data)
if self.commit:
self.conn.commit()
logging.info("Database Insert: Table: " + tableName + " -- Fields: " + str(fields) + " -- Data: " + str(data))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
else:
# Find Position of ApplicationID for replacement
idPos = d_len
for ix in range(d_len):
if fields[ix] == "ApplicationID":
idPos = ix
break
# Get ApplicationID and then insert data into table
try:
qData = list(data)
if tableName != 'Statistics':
self.cursor.execute("SELECT ID FROM Application WHERE ApplicationName=?", (data[idPos],))
appID = self.cursor.fetchone()
qData[idPos] = appID[0]
self.cursor.execute(qField, qData)
if self.commit:
self.conn.commit()
logging.info("Database Insert: Table: " + tableName + " -- Application: " + data[idPos] if idPos < d_len else "None" + " -- Fields: " + str(fields) + " -- Data: " + str(qData))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
# delete
# Parameters: tableName is a string
# fields is a list (or tuple) of column names
# data is a list (or tuple) of data values in proper order w.r.t. fields
# if table != Application, appName goes where appID would be
# Exception: sqlite3.Error if db error
# Delete a row from a table
def delete(self, tableName=None, fields=None, data=None):
# All fields must be defined
if tableName == None or fields == None or data == None or len(fields) != len(data):
return False
f_len = len(fields)
d_len = f_len
# Build query
qField = ["DELETE FROM ", tableName, " WHERE ", fields[0], "=?"]
for ix in range(1,f_len):
qField.append(" and ")
qField.append(fields[ix])
qField.append("=?")
qField = "".join(qField)
# Execute the query
if tableName == 'Application':
try:
self.cursor.execute(qField, data)
if self.commit:
self.conn.commit()
logging.info("Database Delete: Table: " + tableName + " -- Fields: " + str(fields) + " -- Data: " + str(data))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
else:
# Find Position of ApplicationID for replacement
for ix in range(d_len):
if fields[ix] == "ApplicationID":
idPos = ix
break
# Get ApplicationID and then delete data from table
try:
self.cursor.execute("SELECT ID FROM Application WHERE ApplicationName=?", (data[idPos],))
appID = self.cursor.fetchone()
qData = list(data)
qData[idPos] = appID[0]
self.cursor.execute(qField, qData)
if self.commit:
self.conn.commit()
logging.info("Database Delete: Table: " + tableName + " -- Application: " + data[idPos] + " -- Fields: " + str(fields) + " -- Data: " + str(data))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
# update
# Parameters: tableName is a string
# setFields and colFields are lists (or tuples) of column names
# setToFields and colRestrict are lists (or tuples) of data values in proper order w.r.t. fields
# if table != Application, appName goes where appID would be
# Exception: sqlite3.Error if db error
# Delete a row from a table
def update(self, tableName=None, setFields=None, setToFields=None, colFields=None, colRestrict=None):
# No field may be empty
if tableName == None or setFields == None or setToFields == None or colFields == None or colRestrict == None:
return False
# Fields need to be of equal length
if len(setFields) != len(setToFields) or len(colFields) != len(colRestrict) or len(setFields) == 0 or len(colFields) == 0:
return False
s_len = len(setFields)
c_len = len(colRestrict)
# OldFile table needs to increment field, so replacement necessary
qField = ["UPDATE ", tableName, " SET ", setFields[0], "=OldCount + 1" if (setToFields[0] == "OldCount + 1") else "=?"]
for ix in range(1,s_len):
qField.append(",")
qField.append(setFields[ix])
if (setFields[ix] == "OldCount"):
qField.append("=OldCount + 1")
else:
qField.append("=?")
qField.append(" WHERE ")
qField.append(colFields[0])
qField.append("=?")
for ix in range(1,c_len):
qField.append(" and ")
qField.append(colFields[ix])
qField.append("=?")
qField = "".join(qField)
# Execute the query
if tableName == 'Application':
try:
qData = list(setToFields) + list(colRestrict)
self.cursor.execute(qField, qData)
if self.commit:
self.conn.commit()
logging.info("Database Update: Table: " + tableName + " -- Column Fields: " + str(colFields) + " -- Restrictions: " + str(colRestrict) + " -- Set Fields: " + str(setFields) + " -- Set To: " + str(setToFields))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
else:
# Find Position of ApplicationID for replacement
for ix in range(c_len):
if colFields[ix] == "ApplicationID":
idPos = ix
break
# Get ApplicationID and then delete data from table
try:
self.cursor.execute("SELECT ID FROM Application WHERE ApplicationName=?", (colRestrict[idPos],))
appID = self.cursor.fetchone()
qData = list(colRestrict)
qData[idPos] = appID[0]
qData = filter(lambda x: x != "OldCount + 1", list(setToFields)) + qData
self.cursor.execute(qField, qData)
if self.commit:
self.conn.commit()
logging.info("Database Update: Table: " + tableName + " -- Application: " + colRestrict[idPos] + " -- Column Fields: " + str(colFields) + " -- Restrictions: " + str(colRestrict) + " -- Set Fields: " + str(setFields) + " -- Set To: " + str(setToFields))
return True
except sqlite3.Error as e:
logging.exception("Database error: " + str(e.args[0]))
raise
except:
logging.exception("Unexpected error: " + str(sys.exc_info()[0]))
raise
| mit | -6,396,381,771,849,262,000 | 35.214286 | 257 | 0.669062 | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/mkldnn/test_layer_norm_bf16_mkldnn_op.py | 2 | 5371 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# from paddle.fluid.tests.unittests.test_layer_norm_op import *
from __future__ import print_function
import unittest
import numpy as np
from operator import mul
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle import enable_static
from functools import reduce
from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import TestLayerNormMKLDNNOp
from paddle.fluid.tests.unittests.mkldnn.test_layer_norm_mkldnn_op import _reference_layer_norm_naive
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
from paddle.fluid.tests.unittests.op_test import _set_use_system_allocator
np.random.random(123)
_set_use_system_allocator(True)
@unittest.skipIf(not core.supports_bfloat16(),
"place does not support BF16 evaluation")
class TestLayerNormBF16MKLDNNOp(TestLayerNormMKLDNNOp):
def __assert_close(self, tensor, np_array, msg, rtol=2e-02, atol=2):
self.assertTrue(
np.allclose(
np.array(tensor), np_array, rtol=rtol, atol=atol), msg)
def check_forward(self,
shape,
begin_norm_axis,
with_scale_bias=True,
with_is_test=False):
# attr
epsilon = 0.00001
x_shape = shape
D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
scale_shape = [D]
np.random.seed(123)
x = np.random.random_sample(x_shape).astype(np.float32)
x_bf16 = convert_float_to_uint16(x)
if with_scale_bias:
scale = np.random.random_sample(scale_shape).astype(np.float32)
bias = np.random.random_sample(scale_shape).astype(np.float32)
else:
scale = np.array([])
bias = np.array([])
# reference forward & backward
y, mean, variance = _reference_layer_norm_naive(x, scale, bias, epsilon,
begin_norm_axis)
y_bf16 = convert_float_to_uint16(y)
var_dict = locals()
var_names = ['x_bf16', 'mean', 'variance', 'y_bf16']
if with_scale_bias:
var_names.append('scale')
var_names.append('bias')
ground_truth = {name: var_dict[name] for name in var_names}
program = fluid.Program()
with fluid.program_guard(program):
block = program.global_block()
# scale and bias are fp32 and other vars are of bf16
for name in ground_truth:
if name == 'x_bf16' or name == 'y_bf16':
block.create_var(
name=name,
dtype='uint16',
shape=ground_truth[name].shape)
else:
block.create_var(
name=name,
dtype='float32',
shape=ground_truth[name].shape)
inputs = {"X": block.var('x_bf16')}
if with_scale_bias:
inputs["Scale"] = block.var('scale')
inputs["Bias"] = block.var('bias')
block.append_op(
type="layer_norm",
inputs=inputs,
outputs={
"Y": block.var('y_bf16'),
"Mean": block.var('mean'), # share the same memory
"Variance": block.var('variance'), # share the same memory
},
attrs={
"epsilon": epsilon,
"begin_norm_axis": begin_norm_axis,
"use_mkldnn": True,
"is_test": with_is_test
})
exe = fluid.Executor(core.CPUPlace())
input_list = ['x_bf16']
if with_scale_bias:
input_list.append('scale')
input_list.append('bias')
out = exe.run(program,
feed={name: var_dict[name]
for name in input_list},
fetch_list=['y_bf16', 'mean', 'variance'])
self.__assert_close(y_bf16, out[0], "y_bf16", 2)
if not with_is_test:
self.__assert_close(mean, out[1], "mean")
self.__assert_close(variance, out[2], "variance", 1e-3)
def test_check_forward_with_is_test(self):
self.check_forward(
shape=[2, 3, 4, 5], begin_norm_axis=3, with_is_test=True)
# TODO (jczaja): Enable those to test when enabling training using bf16
def test_check_forward_with_scale_and_bias(self):
pass
def test_check_forward_without_scale_and_bias(self):
pass
if __name__ == "__main__":
enable_static()
unittest.main()
| apache-2.0 | 7,962,912,321,997,282,000 | 35.787671 | 101 | 0.559858 | false |
vladyslav2/django-shop | shop/models_bases/managers.py | 8 | 5878 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import AnonymousUser
from django.db import models, transaction
from django.db.models.aggregates import Count
from polymorphic.manager import PolymorphicManager
from shop.order_signals import processing
#==============================================================================
# Product
#==============================================================================
class ProductStatisticsManager(PolymorphicManager):
"""
A Manager for all the non-object manipulation needs, mostly statistics and
other "data-mining" toys.
"""
def top_selling_products(self, quantity):
"""
This method "mines" the previously passed orders, and gets a list of
products (of a size equal to the quantity parameter), ordered by how
many times they have been purchased.
"""
# Importing here is fugly, but it saves us from circular imports...
from shop.models.ordermodel import OrderItem
# Get an aggregate of product references and their respective counts
top_products_data = OrderItem.objects.values(
'product').annotate(
product_count=Count('product')
).order_by('product_count'
)[:quantity]
# The top_products_data result should be in the form:
# [{'product_reference': '<product_id>', 'product_count': <count>}, ..]
top_products_list = [] # The actual list of products
for values in top_products_data:
prod = values.get('product')
# We could eventually return the count easily here, if needed.
top_products_list.append(prod)
return top_products_list
class ProductManager(PolymorphicManager):
"""
A more classic manager for Product filtering and manipulation.
"""
def active(self):
return self.filter(active=True)
#==============================================================================
# Order
#==============================================================================
class OrderManager(models.Manager):
def get_latest_for_user(self, user):
"""
Returns the last Order (from a time perspective) a given user has
placed.
"""
if user and not isinstance(user, AnonymousUser):
return self.filter(user=user).order_by('-modified')[0]
else:
return None
def get_unconfirmed_for_cart(self, cart):
return self.filter(cart_pk=cart.pk, status__lt=self.model.CONFIRMED)
def remove_old_orders(self, cart):
"""
Removes all old unconfirmed order objects.
"""
old_orders = self.get_unconfirmed_for_cart(cart)
old_orders.delete()
def create_order_object(self, cart, request):
"""
Create an empty order object and fill it with the given cart data.
"""
order = self.model()
order.cart_pk = cart.pk
order.user = cart.user
order.status = self.model.PROCESSING # Processing
order.order_subtotal = cart.subtotal_price
order.order_total = cart.total_price
return order
@transaction.commit_on_success
def create_from_cart(self, cart, request):
"""
This creates a new Order object (and all the rest) from a passed Cart
object.
Specifically, it creates an Order with corresponding OrderItems and
eventually corresponding ExtraPriceFields
This will only actually commit the transaction once the function exits
to minimize useless database access.
The `state` parameter is further passed to process_cart_item,
process_cart, and post_process_cart, so it can be used as a way to
store per-request arbitrary information.
Emits the ``processing`` signal.
"""
# must be imported here!
from shop.models.ordermodel import (
ExtraOrderItemPriceField,
ExtraOrderPriceField,
OrderItem,
)
from shop.models.cartmodel import CartItem
# First, let's remove old orders
self.remove_old_orders(cart)
# Create an empty order object
order = self.create_order_object(cart, request)
order.save()
# Let's serialize all the extra price arguments in DB
for field in cart.extra_price_fields:
eoi = ExtraOrderPriceField()
eoi.order = order
eoi.label = unicode(field[0])
eoi.value = field[1]
if len(field) == 3:
eoi.data = field[2]
eoi.save()
# There, now move on to the order items.
cart_items = CartItem.objects.filter(cart=cart)
for item in cart_items:
item.update(request)
order_item = OrderItem()
order_item.order = order
order_item.product_reference = item.product.get_product_reference()
order_item.product_name = item.product.get_name()
order_item.product = item.product
order_item.unit_price = item.product.get_price()
order_item.quantity = item.quantity
order_item.line_total = item.line_total
order_item.line_subtotal = item.line_subtotal
order_item.save()
# For each order item, we save the extra_price_fields to DB
for field in item.extra_price_fields:
eoi = ExtraOrderItemPriceField()
eoi.order_item = order_item
# Force unicode, in case it has àö...
eoi.label = unicode(field[0])
eoi.value = field[1]
if len(field) == 3:
eoi.data = field[2]
eoi.save()
processing.send(self.model, order=order, cart=cart)
return order
| bsd-3-clause | -4,261,975,137,767,838,700 | 35.271605 | 79 | 0.576753 | false |
marrow/web.blueprint | setup.py | 1 | 3701 | #!/usr/bin/env python
# encoding: utf-8
from __future__ import print_function
import os
import sys
import codecs
try:
from setuptools.core import setup, find_packages
except ImportError:
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
if sys.version_info < (2, 7):
raise SystemExit("Python 2.7 or later is required.")
elif sys.version_info > (3, 0) and sys.version_info < (3, 3):
raise SystemExit("Python 3.3 or later is required.")
exec(open(os.path.join("web", "blueprint", "release.py")).read())
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main(self.test_args))
here = os.path.abspath(os.path.dirname(__file__))
tests_require = [
'pytest', # test collector and extensible runner
'pytest-cov', # coverage reporting
'pytest-flakes', # syntax validation
'pytest-cagoule', # intelligent test execution
'pytest-spec', # output formatting
]
setup(
name = "WebCore",
version = version,
description = description,
long_description = codecs.open(os.path.join(here, 'README.rst'), 'r', 'utf8').read(),
url = url,
download_url = 'http://s.webcore.io/aIly',
author = author.name,
author_email = author.email,
license = 'MIT',
keywords = '',
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages = find_packages(exclude=['bench', 'docs', 'example', 'test']),
include_package_data = True,
namespace_packages = [
'web', # primary namespace
'web.blueprint', # quick-start templates
],
entry_points = {
'marrow.blueprint': [
# Core
'webcore.small = web.blueprint.small:SmallBlueprint', # small collection of files
'webcore.medium = web.blueprint.medium:MediumBlueprint', # lightly structured application
'webcore.large = web.blueprint.large:LargeBlueprint', # full MVC separation and structure
# Contentment
# single deployable application, fully dynamic; development and small sites
'contentment.solo = web.blueprint.contentment.solo:SoloBlueprint',
# local write, static read application; blogs and GitHub Pages
'contentment.static = web.blueprint.contentment.static:StaticBlueprint',
# dynamic write, static read application; high-availability, large-scale applications
'contentment.hybrid = web.blueprint.contentment.hybrid:HybridBlueprint',
],
},
install_requires = [
'web.template<3.0.0', # extensible template engine support
'marrow.package<2.0', # dynamic execution and plugin management
'pyyaml', # rich data interchange format; used for configuration
],
extras_require = dict(
development = tests_require,
),
tests_require = tests_require,
dependency_links = [],
zip_safe = True,
cmdclass = dict(
test = PyTest,
)
)
| mit | 8,587,443,514,297,042,000 | 28.141732 | 95 | 0.68468 | false |
rlutes/volttron-applications | pnnl/deprecated/AFDDAgent/afdd/Archive/agent_mt.py | 5 | 11161 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
# Copyright (c) 2015, Battelle Memorial Institute
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation
# are those of the authors and should not be interpreted as representing
# official policies, either expressed or implied, of the FreeBSD
# Project.
#
# This material was prepared as an account of work sponsored by an
# agency of the United States Government. Neither the United States
# Government nor the United States Department of Energy, nor Battelle,
# nor any of their employees, nor any jurisdiction or organization that
# has cooperated in the development of these materials, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness or any
# information, apparatus, product, software, or process disclosed, or
# represents that its use would not infringe privately owned rights.
#
# Reference herein to any specific commercial product, process, or
# service by trade name, trademark, manufacturer, or otherwise does not
# necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors
# expressed herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY
# operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
#}}}
'''This example agent shows how to use an agent with long-running tasks
executing in separate threads. It is not as nice as the greenlet version,
but it works. However, more care is needed to ensure the agent's publish
and subscribe sockets are not used in any of the secondary threads.
'''
import contextlib
import logging
import sys
import threading
import time
import zmq
from zmq.utils import jsonapi
from volttron.platform.agent import BaseAgent, PublishMixin, periodic
from volttron.platform.agent import multithreading, utils, matching
from volttron.platform import messaging
from volttron.platform.messaging import topics
#from volttron.platform.messaging import headers as headers_mod
import settings
_log = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
def afdd(agent, sock):
#Data from Voltron
_log.debug("Rob: AFDD2 is running...")
data = agent.get_new_data()
return_temp = data["ReturnAirTemperature"]
outdoor_temp = data["OutsideAirTemperature"]
mixed_temp = data["MixedAirTemperature"]
# Main Algorithm
if ((mixed_temp < outdoor_temp and mixed_temp < return_temp) or
(mixed_temp > outdoor_temp and mixed_temp > return_temp)):
if not agent.set_point(sock, 'Damper', 0, settings.sync_trial_time):
_log.debug("Lock not Received from controller")
return 29
time.sleep(settings.afdd2_seconds_to_steady_state)
data = agent.get_new_data()
delta = abs(data["MixedAirTemperature"] -
data["ReturnAirTemperature"])
if delta < settings.afdd2_temperature_sensor_threshold:
_log.debug("Outdoor-air temperature sensor problem")
return 21
if not agent.set_point(sock, 'Damper', 100, settings.sync_trial_time):
_log.debug("Lock not Received from controller")
return 29
time.sleep(settings.afdd2_seconds_to_steady_state)
data = agent.get_new_data()
delta = abs(data["MixedAirTemperature"] -
data["OutsideAirTemperature"])
if delta < settings.afdd2_temperature_sensor_threshold:
_log.debug("Return-air temperature sensor problem")
return 22
#If it comes here => both tests fail
_log.debug("Mixed-air temperature sensor problem")
return 23
_log.debug("No Temperature Sensor faults detected")
return 20
def AFDDAgent(config_path, **kwargs):
publish_address = kwargs['publish_address']
config = utils.load_config(config_path)
agent_id = config['agentid']
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
class Agent(PublishMixin, BaseAgent):
def __init__(self, **kwargs):
super(Agent, self).__init__(**kwargs)
self.lock_acquired = False
self.thread = None
self.data_queue = multithreading.WaitQueue()
self.value_queue = multithreading.WaitQueue()
def setup(self):
super(Agent, self).setup()
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
self.publish(topics.ACTUATOR_LOCK_ACQUIRE(**rtu_path), headers)
def start(self, algo=None):
if algo is None:
algo = afdd
def run():
sock = messaging.Socket(zmq.PUSH)
sock.connect(publish_address)
with contextlib.closing(sock):
algo(self, sock)
self.thread = threading.Thread(target=run)
self.thread.daemon = True
self.thread.start()
@matching.match_exact(topics.ACTUATOR_LOCK_RESULT(**rtu_path))
def on_lock_result(self, topic, headers, message, match):
msg = jsonapi.loads(message[0])
holding_lock = self.lock_acquired
if headers['requesterID'] == agent_id:
self.lock_acquired = msg == 'SUCCESS'
elif msg == 'SUCCESS':
self.lock_acquired = False
if self.lock_acquired and not holding_lock:
self.start()
@matching.match_exact(topics.DEVICES_VALUE(point='all', **rtu_path))
def on_new_data(self, topic, headers, message, match):
data = jsonapi.loads(message[0])
self.data_queue.notify_all(data)
@matching.match_glob(topics.ACTUATOR_VALUE(point='*', **rtu_path))
def on_set_result(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), True))
@matching.match_glob(topics.ACTUATOR_ERROR(point='*', **rtu_path))
def on_set_error(self, topic, headers, message, match):
self.value_queue.notify_all((match.group(1), False))
def get_new_data(self, timeout=None):
_log.debug('get_new_data({})'.format(timeout))
return self.data_queue.wait(timeout)
def set_point(self, sock, point_name, value, timeout=None):
_log.debug('set_point({}, {}, {})'.format(point_name, value, timeout))
headers = {
'Content-Type': 'text/plain',
'requesterID': agent_id,
}
with self.value_queue.condition:
sock.send_message(
topics.ACTUATOR_SET(point=point_name, **rtu_path),
headers, str(value), flags=zmq.NOBLOCK)
try:
return self.value_queue._wait(timeout)
except multithreading.Timeout:
return None
Agent.__name__ = 'AFDDAgent'
return Agent(**kwargs)
def main(argv=sys.argv):
'''Main method called by the eggsecutable.'''
utils.default_main(AFDDAgent,
description='VOLTTRON platform™ AFDD agent',
argv=argv)
def test():
from volttron.platform.agent import periodic
def TestAgent(config_path, **kwargs):
config = utils.load_config(config_path)
agent_id = config['agentid']
rtu_path = dict((key, config[key])
for key in ['campus', 'building', 'unit'])
class Agent(PublishMixin, BaseAgent):
def setup(self):
super(Agent, self).setup()
self.damper = 0
@matching.match_regex(topics.ACTUATOR_LOCK_ACQUIRE() + '(/.*)')
def on_lock_result(self, topic, headers, message, match):
_log.debug("Topic: {topic}, {headers}, Message: {message}".format(
topic=topic, headers=headers, message=message))
self.publish(topics.ACTUATOR_LOCK_RESULT() + match.group(0),
headers, jsonapi.dumps('SUCCESS'))
@matching.match_regex(topics.ACTUATOR_SET() + '(/.*/([^/]+))')
def on_new_data(self, topic, headers, message, match):
_log.debug("Topic: {topic}, {headers}, Message: {message}".format(
topic=topic, headers=headers, message=message))
if match.group(2) == 'Damper':
self.damper = int(message[0])
self.publish(topics.ACTUATOR_VALUE() + match.group(0),
headers, message[0])
@periodic(5)
def send_data(self):
data = {
'ReturnAirTemperature': 55,
'OutsideAirTemperature': 50,
'MixedAirTemperature': 45,
'Damper': self.damper
}
self.publish_ex(topics.DEVICES_VALUE(point='all', **rtu_path),
{}, ('application/json', jsonapi.dumps(data)))
Agent.__name__ = 'TestAgent'
return Agent(**kwargs)
settings.afdd2_seconds_to_steady_state = 3
settings.sync_trial_time = 10
t = threading.Thread(target=utils.default_main, args=(TestAgent, 'test'))
t.daemon = True
t.start()
time.sleep(2)
main()
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| bsd-3-clause | 7,253,712,923,699,842,000 | 38.996416 | 82 | 0.629537 | false |
alexgleith/Quantum-GIS | cmake/FindQsci.py | 3 | 2183 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2012, Larry Shaffer <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Larry Shaffer <[email protected]> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Larry Shaffer <[email protected]> ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Larry Shaffer <[email protected]> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""Find QScintilla2 PyQt4 module version.
.. note:: Redistribution and use is allowed according to the terms of the BSD
license. For details see the accompanying COPYING-CMAKE-SCRIPTS file.
"""
__author__ = 'Larry Shaffer ([email protected])'
__date__ = '22/10/2012'
__copyright__ = 'Copyright 2012, The QGIS Project'
try:
from PyQt4.Qsci import QSCINTILLA_VERSION_STR
VER = QSCINTILLA_VERSION_STR
except ImportError, e:
VER = ""
print("qsci_version_str:%s" % VER)
| gpl-2.0 | -3,858,368,333,479,964,700 | 47.511111 | 90 | 0.731104 | false |
sanguinariojoe/FreeCAD | src/Mod/Fem/femobjects/result_mechanical.py | 12 | 9806 | # ***************************************************************************
# * Copyright (c) 2016 Qingfeng Xia <qingfeng.xia()eng.ox.ac.uk> *
# * Copyright (c) 2016 Bernd Hahnebach <[email protected]> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD FEM result mechanical document object"
__author__ = "Qingfeng Xia, Bernd Hahnebach"
__url__ = "https://www.freecadweb.org"
## @package result_mechanical
# \ingroup FEM
# \brief mechanical result object
from . import base_fempythonobject
class ResultMechanical(base_fempythonobject.BaseFemPythonObject):
"""
The Fem::ResultMechanical's Proxy python type, add result specific properties
"""
Type = "Fem::ResultMechanical"
def __init__(self, obj):
super(ResultMechanical, self).__init__(obj)
obj.addProperty(
"App::PropertyString",
"ResultType",
"Base",
"Type of the result",
1 # the 1 set the property to ReadOnly
)
obj.ResultType = str(self.Type)
# for frequency analysis
obj.addProperty(
"App::PropertyInteger",
"Eigenmode",
"Data",
"",
True
)
obj.addProperty(
"App::PropertyFloat",
"EigenmodeFrequency",
"Data",
"User Defined Results",
True
)
# node results
# set read only or hide a property:
# https://forum.freecadweb.org/viewtopic.php?f=18&t=13460&start=10#p108072
# do not show up in propertyEditor of comboView
obj.addProperty(
"App::PropertyVectorList",
"DisplacementVectors",
"NodeData",
"List of displacement vectors",
True
)
obj.addProperty(
"App::PropertyFloatList",
"Peeq",
"NodeData",
"List of equivalent plastic strain values",
True
)
obj.addProperty(
"App::PropertyFloatList",
"MohrCoulomb",
"NodeData",
"List of Mohr Coulomb stress values",
True
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_x",
"NodeData",
"Reinforcement ratio x-direction",
True
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_y",
"NodeData",
"Reinforcement ratio y-direction",
True
)
obj.addProperty(
"App::PropertyFloatList",
"ReinforcementRatio_z",
"NodeData",
"Reinforcement ratio z-direction",
True
)
# these three principal vectors are used only if there is a reinforced mat obj
# https://forum.freecadweb.org/viewtopic.php?f=18&t=33106&p=416006#p416006
obj.addProperty(
"App::PropertyVectorList",
"PS1Vector",
"NodeData",
"List of 1st Principal Stress Vectors",
True
)
obj.addProperty(
"App::PropertyVectorList",
"PS2Vector",
"NodeData",
"List of 2nd Principal Stress Vectors",
True
)
obj.addProperty(
"App::PropertyVectorList",
"PS3Vector",
"NodeData",
"List of 3rd Principal Stress Vectors",
True
)
# readonly in propertyEditor of comboView
obj.addProperty(
"App::PropertyFloatList",
"DisplacementLengths",
"NodeData",
"List of displacement lengths",
True
)
obj.addProperty(
"App::PropertyFloatList",
"vonMises",
"NodeData",
"List of von Mises equivalent stresses",
True
)
obj.addProperty(
"App::PropertyFloatList",
"PrincipalMax",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"PrincipalMed",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"PrincipalMin",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"MaxShear",
"NodeData",
"List of Maximum Shear stress values",
True
)
obj.addProperty(
"App::PropertyFloatList",
"MassFlowRate",
"NodeData",
"List of mass flow rate values",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NetworkPressure",
"NodeData",
"List of network pressure values",
True
)
obj.addProperty(
"App::PropertyFloatList",
"UserDefined",
"NodeData",
"User Defined Results",
True
)
obj.addProperty(
"App::PropertyFloatList",
"Temperature",
"NodeData",
"Temperature field",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressXX",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressYY",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressZZ",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressXY",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressXZ",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStressYZ",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainXX",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainYY",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainZZ",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainXY", "NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainXZ",
"NodeData",
"",
True
)
obj.addProperty(
"App::PropertyFloatList",
"NodeStrainYZ",
"NodeData",
"",
True
)
# initialize the Stats with the appropriate count of items
# see fill_femresult_stats in femresult/resulttools.py
zero_list = 26 * [0]
obj.Stats = zero_list
def onDocumentRestored(self, obj):
# migrate old result objects, because property "StressValues"
# was renamed to "vonMises" in commit 8b68ab7
if hasattr(obj, "StressValues") is True:
obj.addProperty(
"App::PropertyFloatList",
"vonMises",
"NodeData",
"List of von Mises equivalent stresses",
True
)
obj.vonMises = obj.StressValues
obj.removeProperty("StressValues")
# migrate old result objects, because property "Stats"
# consisting of min, avg, max values was reduced to min, max in commit c2a57b3e
if len(obj.Stats) == 39:
temp = obj.Stats
for i in range(12, -1, -1):
del temp[3 * i + 1]
obj.Stats = temp
| lgpl-2.1 | -7,060,192,863,758,567,000 | 29.548287 | 87 | 0.456557 | false |
danforthcenter/plantcv | plantcv/plantcv/visualize/auto_threshold_methods.py | 2 | 3946 | # Compare auto threshold methods for a grayscale image
import os
import cv2
import numpy as np
from plantcv.plantcv import params
from plantcv.plantcv.transform import resize_factor
from plantcv.plantcv import plot_image
from plantcv.plantcv import print_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv.threshold import mean
from plantcv.plantcv.threshold import otsu
from plantcv.plantcv.threshold import gaussian
from plantcv.plantcv.threshold import triangle
def auto_threshold_methods(gray_img, grid_img=True, object_type="light"):
""" Compare auto threshold methods for a grayscale image
Inputs:
gray_img = Grayscale image data
grid_img = Whether or not to compile masks into a single plot
object_type = "light" or "dark" (default: "light")
- If object is lighter than the background then standard thresholding is done
- If object is darker than the background then inverse thresholding is done
Returns:
labeled_imgs = List of labeled plotting images
:param gray_img: numpy.ndarray
:param grid_img: bool
:param object_type: str
:return labeled_imgs: list
"""
# Check that the image is grayscale
if not len(np.shape(gray_img)) == 2:
fatal_error("Input image is not grayscale!")
# Store and disable debug mode
debug = params.debug
params.debug = None
# Initialize threshold method names, mask list, final images
method_names = ["Gaussian", "Mean", "Otsu", "Triangle"]
all_methods = []
labeled_imgs = []
# Set starting location for labeling different masks
y = int(np.shape(gray_img)[0] / 8)
x = int(np.shape(gray_img)[1] / 8)
# Create mask imgs from each thresholding method
all_methods.append(gaussian(gray_img=gray_img, max_value=255, object_type=object_type))
all_methods.append(mean(gray_img=gray_img, max_value=255, object_type=object_type))
all_methods.append(otsu(gray_img=gray_img, max_value=255, object_type=object_type))
all_methods.append(triangle(gray_img=gray_img, max_value=255, object_type=object_type, xstep=1))
# Plot labels of each colorspace on the corresponding img
for i, method in enumerate(all_methods):
converted_img = cv2.cvtColor(method, cv2.COLOR_GRAY2RGB)
labeled = cv2.putText(img=converted_img, text=method_names[i], org=(x, y),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=params.text_size, color=(255, 0, 255), thickness=params.text_thickness)
# Reset debug mode
params.debug = debug
if params.debug == "print":
# If debug is print, save the image to a file
print_image(labeled, os.path.join(params.debug_outdir, str(params.device) + "_" +
method_names[i] + "_vis_thresholds.png"))
elif params.debug == "plot":
# If debug is plot, print to the plotting device
plot_image(labeled)
labeled_imgs.append(labeled)
if grid_img:
# Store and disable debug mode
debug = params.debug
params.debug = None
# Compile images together into one
top_row = np.hstack([labeled_imgs[0], labeled_imgs[1]])
bot_row = np.hstack([labeled_imgs[2], labeled_imgs[3]])
plotting_img = np.vstack([top_row, bot_row])
labeled_imgs.append(plotting_img)
plotting_img = resize_factor(plotting_img, factors=(0.5, 0.5))
# Reset debug mode
params.debug = debug
if params.debug == "print":
# If debug is print, save the image to a file
print_image(plotting_img, os.path.join(params.debug_outdir, str(params.device) + "_vis_all_thresholds.png"))
elif params.debug == "plot":
# If debug is plot, print to the plotting device
plot_image(plotting_img)
return labeled_imgs
| mit | -6,447,388,510,589,562,000 | 40.536842 | 120 | 0.65256 | false |
tstenner/bleachbit | bleachbit/RecognizeCleanerML.py | 1 | 5998 | # vim: ts=4:sw=4:expandtab
# BleachBit
# Copyright (C) 2008-2020 Andrew Ziem
# https://www.bleachbit.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Check local CleanerML files as a security measure
"""
from bleachbit import _, _p
import bleachbit
from bleachbit.CleanerML import list_cleanerml_files
from bleachbit.Options import options
import hashlib
import logging
import os
import sys
logger = logging.getLogger(__name__)
KNOWN = 1
CHANGED = 2
NEW = 3
def cleaner_change_dialog(changes, parent):
"""Present a dialog regarding the change of cleaner definitions"""
def toggled(cell, path, model):
"""Callback for clicking the checkbox"""
__iter = model.get_iter_from_string(path)
value = not model.get_value(__iter, 0)
model.set(__iter, 0, value)
# TODO: move to GuiBasic
from bleachbit.GuiBasic import Gtk
from gi.repository import GObject
dialog = Gtk.Dialog(title=_("Security warning"),
transient_for=parent,
modal=True, destroy_with_parent=True)
dialog.set_default_size(600, 500)
# create warning
warnbox = Gtk.Box()
image = Gtk.Image()
image.set_from_icon_name("dialog-warning", Gtk.IconSize.DIALOG)
warnbox.pack_start(image, False, True, 0)
# TRANSLATORS: Cleaner definitions are XML data files that define
# which files will be cleaned.
label = Gtk.Label(
label=_("These cleaner definitions are new or have changed. Malicious definitions can damage your system. If you do not trust these changes, delete the files or quit."))
label.set_line_wrap(True)
warnbox.pack_start(label, True, True, 0)
dialog.vbox.pack_start(warnbox, False, True, 0)
# create tree view
liststore = Gtk.ListStore(GObject.TYPE_BOOLEAN, GObject.TYPE_STRING)
treeview = Gtk.TreeView(model=liststore)
renderer0 = Gtk.CellRendererToggle()
renderer0.set_property('activatable', True)
renderer0.connect('toggled', toggled, liststore)
# TRANSLATORS: This is the column label (header) in the tree view for the
# security dialog
treeview.append_column(
Gtk.TreeViewColumn(_p('column_label', 'Delete'), renderer0, active=0))
renderer1 = Gtk.CellRendererText()
# TRANSLATORS: This is the column label (header) in the tree view for the
# security dialog
treeview.append_column(
Gtk.TreeViewColumn(_p('column_label', 'Filename'), renderer1, text=1))
# populate tree view
for change in changes:
liststore.append([False, change[0]])
# populate dialog with widgets
scrolled_window = Gtk.ScrolledWindow()
scrolled_window.add(treeview)
dialog.vbox.pack_start(scrolled_window, True, True, 0)
dialog.add_button(Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)
dialog.add_button(Gtk.STOCK_QUIT, Gtk.ResponseType.CLOSE)
# run dialog
dialog.show_all()
while True:
if Gtk.ResponseType.ACCEPT != dialog.run():
sys.exit(0)
delete = []
for row in liststore:
b = row[0]
path = row[1]
if b:
delete.append(path)
if 0 == len(delete):
# no files selected to delete
break
from . import GuiBasic
if not GuiBasic.delete_confirmation_dialog(parent, mention_preview=False):
# confirmation not accepted, so do not delete files
continue
for path in delete:
logger.info("deleting unrecognized CleanerML '%s'", path)
os.remove(path)
break
dialog.destroy()
def hashdigest(string):
"""Return hex digest of hash for a string"""
# hashlib requires Python 2.5
if isinstance(string, str):
string = string.encode()
return hashlib.sha512(string).hexdigest()
class RecognizeCleanerML:
"""Check local CleanerML files as a security measure"""
def __init__(self, parent_window=None):
self.parent_window = parent_window
try:
self.salt = options.get('hashsalt')
except bleachbit.NoOptionError:
self.salt = hashdigest(os.urandom(512))
options.set('hashsalt', self.salt)
self.__scan()
def __recognized(self, pathname):
"""Is pathname recognized?"""
with open(pathname) as f:
body = f.read()
new_hash = hashdigest(self.salt + body)
try:
known_hash = options.get_hashpath(pathname)
except bleachbit.NoOptionError:
return NEW, new_hash
if new_hash == known_hash:
return KNOWN, new_hash
return CHANGED, new_hash
def __scan(self):
"""Look for files and act accordingly"""
changes = []
for pathname in sorted(list_cleanerml_files(local_only=True)):
pathname = os.path.abspath(pathname)
(status, myhash) = self.__recognized(pathname)
if NEW == status or CHANGED == status:
changes.append([pathname, status, myhash])
if len(changes) > 0:
cleaner_change_dialog(changes, self.parent_window)
for change in changes:
pathname = change[0]
myhash = change[2]
logger.info("remembering CleanerML file '%s'", pathname)
if os.path.exists(pathname):
options.set_hashpath(pathname, myhash)
| gpl-3.0 | 905,560,385,984,819,800 | 32.50838 | 177 | 0.642714 | false |
henriquebastos/googlegroupexporter | googlegroupexporter/exporters/mbox.py | 1 | 1241 | from mailbox import mbox, Message
from googlegroupexporter.exporters import Exporter
class MailExporter(Exporter):
def __init__(self, *args, **kwargs):
super(MailExporter, self).__init__(*args, **kwargs)
self.mbox = None
self.summary = {}
def before_export(self, group_name):
output = group_name + '.mbox'
self.mbox = mbox(output)
self.summary = dict(indexes=0, topics=0, messages=0)
def __str__(self):
return '{indexes} index pages listing {topics} topics with {messages} messages.'.format(**self.summary)
def after_export(self):
self.mbox.close()
def process_index(self, page):
self.summary['indexes'] += 1
def process_topic(self, page):
self.summary['topics'] += 1
def process_message(self, page):
id_ = fid, tid, mid = page.id
headers = {
'GGE-Url': page.url,
'GGE-Id': '{}/{}/{}'.format(*id_),
'GGE-Forum-Id': fid,
'GGE-Topic-Id': tid,
'GGE-Message-Id': mid,
}
msg = Message(str(page))
for k, v in headers.items():
msg.add_header(k, v)
self.mbox.add(msg)
self.summary['messages'] += 1
| mit | -6,006,324,279,972,019,000 | 24.854167 | 111 | 0.551974 | false |
PluribusNetworks/pluribus_neutron | neutron-plugin-pluribus/setup.py | 1 | 1360 | #!/usr/bin/env python
# COPYRIGHT 2014 Pluribus Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import setuptools
setuptools.setup(
author='Pluribus Networks',
author_email='[email protected]',
description='OpenStack Neutron Pluribus plugin',
license='Apache License, Version 2.0',
long_description=open("README.rst").read(),
name='neutron-plugin-pluribus',
entry_points={
'console_scripts': []},
packages=setuptools.find_packages(
exclude=['*.tests','*.tests.*','tests.*','tests']),
url='http://www.pluribusnetworks.com',
version='3.0',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
zip_safe=False,
)
| apache-2.0 | -4,210,123,063,918,219,300 | 33.871795 | 75 | 0.681618 | false |
mmgrant73/zeitcoin | zeitcoinamp.py | 1 | 73609 | #!/usr/bin/env python
#########################################
# Zeitcoin AMP Class
#########################################
import sys, os, time, threading, hashlib, random
from zeitcoindb import hashtable
from zeitcoinutility import utility,encyption
from zeitcointrans import transactions
from twisted.protocols import amp
from twisted.protocols.amp import AMP
from twisted.web import server
from twisted.application import service, internet
from twisted.internet import reactor, defer, endpoints, task, threads
from twisted.internet.defer import inlineCallbacks, Deferred
from twisted.internet.endpoints import TCP4ClientEndpoint, connectProtocol
from twisted.internet.protocol import Factory
from twisted.internet.threads import deferToThread
ALIVE=1
FNAME='zeitcoin'
ADDRESS='127.0.0.1'
PORT=1234
GUID='1'
TXCOINHASH=''
TXADDRESS=''
TXMESSAGE=''
class Getuuid(amp.Command):
# Get a unique id for a peer (guid)
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Copyht(amp.Command):
# copy a peer hast table and give it to a newly joined peer
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Getnumtb(amp.Command):
# Get the number of transaction in the transaction block
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.Integer())]
class Updatetb(amp.Command):
# Tells the server to send the most recent txid
arguments = [('thash', amp.String()),('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Encryptdata(amp.Command):
# Tells the peer to encrypted the data send to it
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer()),('data', amp.String())]
response = [('reply', amp.String())]
class Getnewaddress(amp.Command):
# Tells the server to create a new account and send back its address
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer()),('account', amp.String())]
response = [('reply', amp.String())]
class Getleader(amp.Command):
# Tells the server to randomly select a leader by generating a guid and find the nearest peer
arguments = [('clientaddress', amp.String()), ('clientport', amp.Integer()), ('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Setleader(amp.Command):
# Tells the server to be the leader
arguments = [('clientaddress', amp.String()), ('clientport', amp.Integer()), ('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.Boolean())]
class Sendtransaction(amp.Command):
# Tells the server to be the leader
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer()), ('coinhash', amp.String()),('receiveraddress', amp.String()), ('message', amp.Integer())]
response = [('reply', amp.Boolean())]
class Gettransaction(amp.Command):
# Tells the server to be the leader
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.Boolean())]
class Boardcasttrans(amp.Command):
# Tells the server to be the leader
arguments = [('txid', amp.Integer()), ('thash', amp.String()), ('ts', amp.Float()),('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Generatecoin(amp.Command):
# Tells the server to generate a new coin
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Acceptcoin(amp.Command):
# Tells the server to accept the newly generated coin
arguments = [('txid', amp.Integer()), ('thash', amp.String()), ('ts', amp.Float()),('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Ping(amp.Command):
# pings a given peer hash table to see what nodes are still alive and remove any that isn't from the hash table
# should be ran once an hour or so
#arguments = []
arguments =[('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Donetourguide(amp.Command):
# pings a given peer hash table to see what nodes are still alive and remove any that isn't from the hash table
# should be ran once an hour or so
#arguments = []
arguments =[('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Getpuzzle(amp.Command):
# Tells the server to generate a new puzzle for a guided tour h0,ts,L,arraytg
arguments = [('guid', amp.String()),('h0', amp.String()),('ts', amp.Float()),('L', amp.Integer()),('arraytg', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Verifypuzzle(amp.Command):
# Tells the server to verify a puzzle for a guided tour
arguments = [('guid', amp.String()),('h0', amp.String()),('hl', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.Boolean())]
class Leaderinfo(amp.Command):
# Tells the server to be the leader
arguments = [('address', amp.String()), ('port', amp.Integer()), ('guid', amp.String())]
response = [('reply', amp.Boolean())]
class Initleader(amp.Command):
# Tells the server to return the closest guid in it's hash table
arguments = [('message', amp.String()),('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Getclosest(amp.Command):
# Tells the server to return the closest guid in it's hash table
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Getclosestpeer(amp.Command):
# Tells the server to return the closest guid in it's hash table
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Sendpublickey(amp.Command):
# Tells the server to send its public key
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Join(amp.Command):
# Tells the server that a new peer has join the network
arguments = [('guid', amp.String()), ('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Leave(amp.Command):
# Tells the server that a peer has left the network
arguments = [('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
class Sendtotg(amp.Command):
# Tells the server that is acting like a tour guide to verify the hash sent to it from the client
arguments = [('hashvalue', amp.String()), ('stopnumber', amp.Integer()), ('length', amp.Integer()), ('guid', amp.String()), ('ts', amp.Float()),('guid', amp.String()),('address', amp.String()), ('port', amp.Integer())]
response = [('reply', amp.String())]
##########################################################################
# move to another file protocol which holds classes Ziet, clientcommands #
##########################################################################
class Zeit(amp.AMP):
def getuuid(self,guid,address,port):
# tested and works
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a getuuid message ',d
ut=utility(self.filename,self.address,self.port)
guid=ut.generateguid()
ut.updatepeer(guid,address,port)
return {'reply': guid}
Getuuid.responder(getuuid)
def ping(self,guid,address,port):
# tested and works
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a ping message ',d
str1="hello: address - "+str(address)+" port - "+str(port)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': str1}
Ping.responder(ping)
def donetourguide(self,guid,address,port):
# tested and works
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a donetourguide message ',d
self.donewithtg(address,port)
str1="ok"
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': str1}
Donetourguide.responder(donetourguide)
def getpuzzle(self,guid,h0,ts,L,arraytg,address,port):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a getpuzzle message ',d
gt=guidedtour()
gt.getpuzzle(guid,h0,ts,L,arraytg)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Getpuzzle.responder(getpuzzle)
def verifypuzzle(self,guid,h0,hl,address,port):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a verifypuzzle message ',d
result=True
gt=guidedtour()
#gt.verifyupuzzle(guid,h0,hl)
flag=0
keylist=[]
h0=firsthash
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.tnumtb)
d.addCallback(ht.tgetkeys)
d.addCallback(self._verifypuzzle,ht,dbpool)
d.addCallback(ht.tdeleteleader,dbpool)
d.addCallback(self.__verifypuzzle,ht,dbpool)
d.callback(dbpool)
#N=ht.numtg()
#keyslist=ht.getkeys()
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return d #{'reply': result}
Verifypuzzle.responder(verifypuzzle)
def _verifypuzzle(self,ht,dbpool):
print "got to _verifypuzzle"
for i in range(0,L):
tourindex=self.gettourindex(h0,N)
key=keylist[tourindex]
h0=self.tourguideverify(h0,i+1,L,clientguid,key,ts)
#ht.deleteleader()
if (h0==lasthash):
result=True
#store the transaction and boardcast it
else:
result=False
print "message to verify h0-"+h0+" hl-"+hl+" from client-"+guid
return result
def __verifypuzzle(self,result,ht,dbpool):
print "got to _verifypuzzle"
ht.tclosedb(dbpool)
return {'reply': result}
def getnewaddress(self,guid,address,port,account):
# Need to fix database (dbpool)
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a getnewaddress message ",d
ut=utility(self.filename,self.address,self.port)
ht=hashtable()
en=encyption()
time1=ut.gettimestamp()
address=ut.generateguid()
pubkey,privkey=en.generatekeys()
dbpool=ht.tconnectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.taddaccount,account,privkey,pubkey,address,time1)
d.addCallback(self._getnewaddress,address,ht,dbpool)
d.callback(dbpool)
return d #{'reply': count1}
Getnewaddress.responder(getnewaddress)
def _getnewaddress(self,dummy,guid,address,ht,dbpool):
print "got to _getnewaddress"
print "newaddress - ",address
ht.tclosedb(dbpool)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': address}
def getnumtb(self,guid,address,port):
# Need to fix database (dbpool)
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a getnumtb message ",d
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.tnumtb)
d.addCallback(self._getnumtb,ht,dbpool)
d.callback(dbpool)
return d #{'reply': count1}
Getnumtb.responder(getnumtb)
def _getnumtb(self,count1,ht,dbpool):
print "got to _getnumtb"
ht.tclosedb(dbpool)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': count1}
def updatetb(self,thash,guid,address,port):
# Need to fix database (dbpool)
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a updatetb message ",d
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
#conn,c=ht.connectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.tgetalltb)
d.addCallback(self._updatetb,thash,ht,dbpool)
d.callback(dbpool)
#listtb=ht.getalltb(c)
return d #{'reply': str(newtrans[1])}
Updatetb.responder(updatetb)
def _updatetb(self,listtb,thash,ht,dbpool):
print "got to _updatetb"
trans=listtb[0]
thash1=trans[1]
if (thash==thash1):
newtrans=listtb[1]
else:
newtrans=list
ht.tclosedb(dbpool)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': str(newtrans[1])}
def getleader(self,clientaddress,clientport,guid,address,port):
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a getleader message ",d
ut=utility(self.filename,self.address,self.port)
result=self.getleader(address,port,guid)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Getleader.responder(getleader)
def boardcasttrans(self,txid,thash,ts,guid,address,port):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a boardcast messageb ",d
ut=utility(self.filename,self.address,self.port)
result=self.boardcasttrans(txid,thash,ts)
return {'reply': 'ok'}
Boardcasttrans.responder(boardcasttrans)
def generatecoin(self,guid,address,port):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a generatecoin message ",d
#tx=transactions()
self.generatenewcoin()
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Generatecoin.responder(generatecoin)
def encryptdata(self,guid,address,port,data):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received an encryptdata message ",d
en=encyption()
pubkey=en.getpubkey(FNAME)
message=en.encyptmessage(data,pubkey)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': str(message)}
Encryptdata.responder(encryptdata)
def acceptcoin(self,txid,thash,ts,guid,address,port):
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received an acceptcoin message ",d
tx=transactions()
tx.acceptcoin(txid,thash,ts)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Acceptcoin.responder(acceptcoin)
def setleader(self,clientaddress,clientport,guid,address,port):
global Leaderflag,Clientaddress,Clientport,Clientguid,ADDRESS,PORT
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a setleader message ",d
if (Leaderflag==1):
result=False
else:
# store client address,port and guid
self.setleader(clientaddress,clientport,guid)
Leaderflag=1
result=True
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': result}
Setleader.responder(setleader)
def sendtransaction(self,guid,address,port,coinhash,receiveraddress,message):
global ADDRESS,PORT,FNAME,TXCOINHASH,TXADDRESS,TXMESSAGE
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a sendtransaction message ",d
# store this information to be sent after guide tour
TXCOINHASH=coinhash
TXADDRESS=receiveraddress
TXMESSAGE=message
result=True
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': result}
Sendtransaction.responder(sendtransaction)
def gettransaction(self,guid,address,port):
global ADDRESS,PORT,FNAME,TXCOINHASH,TXADDRESS,TXMESSAGE
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a sendtransaction message ",d
# store this information to be sent after guide tour
result=str(TXCOINHASH)+":"+str(TXADDRESS)+":"+str(TXMESSAGE)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': result}
Gettransaction.responder(gettransaction)
def leaderinfo(self,address,port,guid):
global Leaderaddress, Leaderport, Leaderguid,ADDRESS,PORT
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
Leaderaddress=self.address
Leaderport=self.port
Leaderguid=guid
print frm+" received a leaderinfo message ",d
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Leaderinfo.responder(leaderinfo)
def initleader(self,message,guid,address,port):
global self.filename,self.address,self.port
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a initleader message ",d
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': 'ok'}
Initleader.responder(initleader)
def sendpublickey(self,guid,address,port):
# tested and works
global self.filename,self.address,self.port
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
print frm+" received a sendpublickey message ",d
en = encyption()
pubkey=en.getpublickey(FNAME)
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': str(pubkey)}
Sendpublickey.responder(sendpublickey)
def join(self,guid,addres,port):
# new peer join command
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
ut=utility(self.filename,self.address,self.port)
guid=ut.generateguid()
result=ut.getht()
result+=guid
return {'reply': result}
Join.responder(join)
def leave(self,guid,address,port):
# peer leave command
global ADDRESS,PORT,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+' received a leave message ',d
self.leave(guid)
return {'reply': 'ok'}
Leave.responder(leave)
def sendtotg(self,hashvalue,stopnumber,length,guid,ts,guid1,address,port):
# h_{l} = hash(h_{l-1}\; ||\; l\; ||\; L\; ||\; A_{x}\; ||\; ts\; ||\; k_{js})
# ks - pulled from public key
# ts and guid will be stored by the tg and verify when sedtotg is called
global ADDRESS,PORT,FNAME
frm=str(self.address)+":"+str(self.port)
d = str(self.address)+":"+str(self.port)
gt=guidedtour()
print frm+" received a sendtotg message ",d
print "tour guided received h0-"+hashvalue+" stopnumber-"+str(stopnumber)+" L-"+str(length)+" guid-"+guid+" ts-"+str(ts)
#result=gt.tourguideverify(hashvalue,stopnumber,length,guid,ts)
result="true"
ut=utility(self.filename,self.address,self.port)
ut.updatepeer(guid,address,port)
return {'reply': result}
Sendtotg.responder(sendtotg)
def copyht(self,guid,address,port):
# tested and works
global ADDRESS,PORT,FNAME,FNAME
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a copyht message ",d
#ut=utility(self.filename,self.address,self.port)
#result=ut.getht()
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.tgetallht)
d.addCallback(self._copyht,ht,dbpool)
d.callback(dbpool)
return d #{'reply': result}
Copyht.responder(copyht)
def _copyht(self,htlist,ht,dbpool):
print "got to _copyht"
result=''
for row in htlist:
result+=str(row[0])+","+str(row[1])+","+str(row[2])+","+str(row[3])+","+str(row[4])+":"
res=result[:-1]
ht.tclosedb(dbpool)
return {'reply': res}
def getclosest(self,guid,address,port):
# Need to fix database (dbpool)
global self.filename,self.address,self.port
d = str(self.address)+":"+str(self.port)
frm=str(self.address)+":"+str(self.port)
print frm+" received a getclosest message ",d
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
d = defer.Deferred()
d.addCallback(ht.tgetallguidht)
d.addCallback(self._getclosest,guid,ht,dbpool)
d.addCallback(ht.tgetaddress,dbpool)
d.addCallback(self.__getclosest,ht,dbpool)
d.callback(dbpool)
return d #{'reply': result}
Getclosest.responder(getclosest)
def _getclosest(self,peerlist,guid,ht,dbpool):
print "got to _getclosest"
ut=utility(self.filename,self.address,self.port)
guid1,d1=ut.closestpeer(guid, peerlist)
print "dbpool ",dbpool
print "guid1 ",guid1
print "d1 ",d1
#ht.tclosedb(dbpool)
return guid1,d1
def __getclosest(self,guid1,ht,dbpool):
print "got to __getclosest"
ht.tclosedb(dbpool)
result=str(guid1)
return {'reply': result}
def getclosestpeer(self,guid):
global CLOSESTGUID,CLOSESTADDRESS,CLOSESTPORT,CLOSESTDISTANCE
d = defer.Deferred()
d.addCallback(self._getclosestpeer)
d.addCallback(self.__getclosestpeer)
d.callback(guid)
return d
Getclosestpeer.responder(getclosestpeer)
def __getclosestpeer(self,guid):
global CLOSESTGUID,CLOSESTADDRESS,CLOSESTPORT,CLOSESTDISTANCE
result= str(CLOSESTGUID)+":"+str(CLOSESTADDRESS)+":"+str(CLOSESTPORT)+":"+str(CLOSESTDISTANCE)
return {'reply': result}
@defer.deferredGenerator
def _getclosestpeer(self,guid):
global FNAME,CLOSESTGUID,CLOSESTADDRESS,CLOSESTPORT,CLOSESTDISTANCE
flag=0
d1=999999999999999999999999999999999999999999
cc=clientcommands()
ht=hashtable()
ut=utility(self.filename,self.address,self.port)
conn,c=ht.connectdb(FNAME)
peerlist=ht.getallguidht(c)
guid,d2=ut.closestpeer(peerid, peerlist)
address,port=ht.getaddress(c,guid)
ht.closedb(conn)
if (d2==0): #[4^3 *3/(5-8)*(n^2+3)]^2+3*5+7=.19*(5+0.07)*5-2^2+5*2
flag=1
guid1=guid
while (flag==0):
#guid1,address,port,d2=cc.dogetclosest(address,port,guid) # use yield
wfd = defer.waitForDeferred(cc.dogetclosest(address,port,guid))
yield wfd
data = str(wfd.getResult())
datalist=data.split(":")
guid1=datalist[0]
address=datalist[1]
port=datalist[2]
d2=datalist[3]
if (d2==0):
flag=1
else:
if (d2<d1):
d1=d2
address1=address
port1=port
guid2=guid1
else:
address=self.address1
port=self.port1
guid1=guid2
flag=1
CLOSESTaddress=self.address
CLOSESTport=self.port
CLOSESTGUID=guid1
CLOSESTDISTANCE=D1
#return guid1,address,port
@defer.deferredGenerator
def leave(self,guid):
global FNAME
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
wfd = defer.waitForDeferred(ht.tdeleteht(dbpool,guid))
yield wfd
ht.tclosedb(dbpool)
print "Guid - "+guid+" is leaving the network"
return
def boardcasttrans(self,txid,thash,ts):
global FNAME
cc=clientcommands()
ht=hashtable()
ut=utility(self.filename,self.address,self.port)
conn,c=ht.connectdb(FNAME)
res=ht.checktrans(conn,c,txid)
if (res==False):
guid=ht.getguid(c)
ht.addtb(conn,c,txid,thash,ts)
peerlist=ht.getallguidht(c)
guid1,d1=ut.closestpeer(guid,peerlist)
guid2,d2=ut.farestpeer(guid,peerlist)
address,port=ht.getaddress(c,guid1)
address1,port1=ht.getaddress(c,guid2)
print "closest peer ",guid1,address,port
print "farest peer ",guid2,address1,port1
cc.doboardcasttrans(address,port,txid,thash,ts)
cc.doboardcasttrans(address1,port1,txid,thash,ts)
else:
print "already exist in the transaction block"
return
@defer.deferredGenerator
def getleader(self,address,port,guid):
global FNAME
cc=clientcommands()
ut=utility(self.filename,self.address,self.port)
randomguid=ut.generateguid()
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
peerlist = wfd.getResult()
#peerlist=ht.getallguidht(c)
guid1,d2=ut.closestpeer(randomguid, peerlist)
wfd = defer.waitForDeferred(ht.tgetaddress(dbpool,guid1,d2))
yield wfd
data1 = wfd.getResult()
address1=str(data1[0])
port1=int(data1[1])
#address1,port1=ht.getaddress(c,guid1)
ht.tclosedb(dbpool)
# check if the random is already a leader or a tour guide
#res=self.setleader(address1,port)
wfd = defer.waitForDeferred(cc.dosetleader(str(address1),int(port1),str(address),int(port),str(guid1)))
yield wfd
res = wfd.getResult()
print "res=",res
wfd = defer.waitForDeferred(cc.doleaderinfo(str(address),int(port),str(guid1),str(address1),int(port1)))
yield wfd
res = wfd.getResult()
print "result-",res
print "The leader will have guid - "+guid1+" address - "+address1+" port - "+str(port1)
#wfd = defer.waitForDeferred(cc.doinitleader(str(address1),int(port1)))
#yield wfd
#res = wfd.getResult()
#print "result-",res
return
@defer.deferredGenerator
def setleader(self,address,port,guid):
global Leaderflag,Clientaddress,Clientport,Clientguid,FNAME
gt=guidedtour()
Clientaddress=self.address
Clientport=self.port
Clientguid=guid
#gtlist=gt.gettourguides()
gtlist=[]
N=random.randint(gt.MINTG,gt.MAXTG)
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
peerlist = wfd.getResult()
wfd = defer.waitForDeferred(ht.tnumht(dbpool))
yield wfd
count1 = wfd.getResult()
print "peerlist=",peerlist
for i in range(1,N):
gt = random.randint(0,count1-1)
print "gt=",gt
gtlist.append(peerlist[gt])
gt.getsharedsecret(gtlist)
return
@defer.deferredGenerator
def donewithtg(self,guid,address,port):
global Leaderflag,Clientaddress,Clientport,Clientguid,FNAME,TXCOINHASH,TXADDRESS,TXMESSAGE, Leaderaddress,Leaderport
wfd = defer.waitForDeferred(cc.dogettransaction(str(address),int(port)))
yield wfd
res = str(wfd.getResult())
listres=res.split(":")
TXCOINHASH = str(listres[0])
TXADDRESS = str(listres[1])
TXMESSAGE = str(listres[2])
wfd = defer.waitForDeferred(cc.dogettransaction(str(Leaderaddress),int(Leaderport),str(TXCOINHASH), str(TXADDRESS),str(TXMESSAGE)))
yield wfd
res = str(wfd.getResult())
wfd = defer.waitForDeferred(cc.doinitleader(str(Leaderaddress),int(Leaderport)))
yield wfd
res = wfd.getResult()
print "result-",res
return
@defer.deferredGenerator
def generateht(self):
# Once a hash table is copy the new peer will have to use it to generate its own hash table
global FNAME
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
guidlist = wfd.getResult()
#guidlist=ht.getallguidht(c)
for guid in guidlist:
#address,port=ht.getaddress(c,guid)
wfd = defer.waitForDeferred(ht.tgetaddress(dbpool,guid1))
yield wfd
address1,port1 = wfd.getResult()
#guid1,d1,address,port=cc.getclosest(address,port,guid)
wfd = defer.waitForDeferred(cc.getclosest(address,port,guid))
yield wfd
data = str(wfd.getResult())
# check to see if the guid exist if so do nothing
#res=ht.checkguid(conn,c,guid1)
wfd = defer.waitForDeferred(ht.tcheckguid(dbpool,guid1))
yield wfd
res = wfd.getResult()
# else delete the guid and add guid1 to the hash table
if (res==False):
wfd = defer.waitForDeferred(ht.tdeleteht(dbpool,guid1))
yield wfd
#ht.deleteht(conn,c,guid1)
time1=time.time()
flag=0
wfd = defer.waitForDeferred(ht.taddht(dbpool,guid1,address,port,flag,time1))
yield wfd
#ht.addht(conn,c,guid1,address,port,flag,time1)
ht.tclosedb(dbpool)
return
@defer.deferredGenerator
def sendcoin(self,guid,address,port,receiveraddress,coinhash,message):
ut=utility(self.filename,self.address,self.port)
time1=ut.gettimestamp()
wfd = defer.waitForDeferred(cc.doencryptdata(address,port,str(coinhash)))
yield wfd
signmessage1 = wfd.getResult()
senderscript = str(coin)+" "+str(signmessage1)+" "
receiverscript = " decode = verify"
transaction=tx.formattransaction(coinhash,senderscript,receiverscript,receiveraddress,message)
txid=hashlib.sha1(transaction).hexdigest()
cc.doacceptcoin(address,port,txid,transaction,receiveraddress,coinhash,time1)
return
@defer.deferredGenerator
def generatenewcoin(self): #doacceptcoin(self,address,port,txid1,thash1,ts1)
# will generate a new coin and give it to a random client
global FNAME
previoushash="00000000000000000000000000000000"
ut=utility(self.filename,self.address,self.port)
en= encyption()
ht=hashtable()
cc=clientcommands()
gt=guidedtour()
tx=transactions()
time1=ut.gettimestamp()
dbpool=ht.tconnectdb(FNAME)
randaddress=ut.generateguid()
coin=ut.generateguid()
privkey=en.getprivatekey(FNAME)
pubkey=en. getpublickey(FNAME)
signmessage=en.signmessage(coin,privkey)
# publickey = receiver public key
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
peerlist = wfd.getResult()
guid,d2=ut.closestpeer(randaddress, peerlist)
wfd = defer.waitForDeferred(ht.tgetaddress(dbpool,guid,d2))
yield wfd
data1 = wfd.getResult()
address=str(data1[0])
port=int(data1[1])
ht.tclosedb(dbpool)
wfd = defer.waitForDeferred(cc.dogetnewaddress(address,port,"newcoin"))
yield wfd
receiveraddress = str(wfd.getResult())
print "receiveraddress=",receiveraddress
wfd = defer.waitForDeferred(cc.doencryptdata(address,port,str(coin)))
yield wfd
signmessage1 = wfd.getResult()
#signmessage1=en.encyptmessage(coin,publickey)
senderscript = str(coin)+" "+str(signmessage1)+" "
receiverscript = " decode = verify"
message="test"
transaction=tx.formattransaction(previoushash,senderscript,receiverscript,receiveraddress,message)
txid=hashlib.sha1(transaction).hexdigest()
print "new coinhash - ",coin
print "transaction - ",transaction
print "txid - ",txid
print "sending it to guid-"+guid+" at address-"+address+" port-"+str(port)
cc.doacceptcoin(address,port,txid,transaction,receiveraddress,coin,time1)
previousthash,lensender,senderscript,receiveraddress,lenreceiver,receiverscript,lenmessage,message = tx.decodetransaction(transaction)
print "decoding transaction......."
print "previoushash = ",previousthash
print "lensender = ",lensender
print "senderrscript = ",senderscript
print "receiveraddress = ",receiveraddress
print "lenreceiver = ", lenreceiver
print "receiverscript = ",receiverscript
print "lenmessage = ",lenmessage
print "message = ",message
# send this to the randaddress
return
@defer.deferredGenerator
def checkblocktrans(txid):
# check the network for updated transactions
global FNAME,highestnumtb,updateaddress,updateport
highestnumtb=0
ht=hashtable()
cc=clientcommands()
#conn,c=ht.connectdb(FNAME)
dbpool=ht.tconnectdb(FNAME)
#peerlist=ht.getallguidht(c)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
peerlist = wfd.getResult()
for peer in peerlist:
#address,port=ht.getaddress(c,peer)
wfd = defer.waitForDeferred(ht.tgetaddress(dbpool,guid1))
yield wfd
address1,port1 = wfd.getResult()
wfd = defer.waitForDeferred(cc.dogetnumtb(address,port))
yield wfd
data = int(wfd.getResult())
if (data>highestnumtb):
highestnumtb=data
updateaddress=self.address
updateport=self.port
ht.tclosedb(dbpool)
# find the peer with the highest numtb and request update from that peer
self.updateblocktrans(txid,updateaddress,updateport,highestnumtbnum)
return
@defer.deferredGenerator
def updateblocktrans(txid,guid,address,port,num):
# update transaction block with new txid
global FNAME,highestnumtb,updateaddress,updateport
cc=clientcommands()
ht=hashtable()
#conn,c=ht.connectdb(FNAME)
dbpool=ht.tconnectdb(FNAME)
wfd = defer.waitForDeferred(ht.tnumtb(dbpool))
yield wfd
numtb = int(wfd.getResult())
#numtb=ht.numtb(c)
count1=0
while (numtb<highestnumtb and count1<50):
# find a way to loop through this to get all the updates
wfd = defer.waitForDeferred(cc.doupdatetb(address,port))
yield wfd
count+=1
wfd = defer.waitForDeferred(ht.tnumtb(dbpool))
yield wfd
numtb = int(wfd.getResult())
#numtb=ht.numtb(c)
ht.tclosedb(dbpool)
return
class clientcommands:
def __init__(self,filename,address,port):
self.filename = filename
self.address = address
self.port = port
def doping(self,address1,port1,guid):
global GUID
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
pingDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a ping message to ",dest
return ampProto.callRemote(Ping, guid=GUID,address=self.address, port=self.port )
pingDeferred.addCallback(connected)
def handleFailure(f):
#would this be the best way. will it block if it timeout?
print "errback"
print "we got an exception: %s" % (f.getTraceback(),)
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the ping message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
pingDeferred.addErrback(handleFailure)
def pinged(result):
print frm+" has received a ping reply from ", dest
print "result = ",result['reply']
return result['reply']
pingDeferred.addCallback(pinged)
return pingDeferred
def dodonetourguide(self,address1,port1):
global GUID
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
donetourguideDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a donetourguide message to ",dest
return ampProto.callRemote(Donetourguide, guid=GUID,address=self.address, port=self.port )
donetourguideDeferred.addCallback(connected)
def handleFailure(f):
print "errback"
print "we got an exception: %s" % (f.getTraceback(),)
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the ping message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
donetourguideDeferred.addErrback(handleFailure)
def donetourguideed(result):
print frm+" has received a donetourguide reply from ", dest
print "result = ",result['reply']
return result['reply']
donetourguideDeferred.addCallback(donetourguideed)
return donetourguideDeferred
def dogetguid(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
getguidDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getguid message to ",dest
return ampProto.callRemote(Getuuid,guid=GUID,address=self.address,port=self.port)
getguidDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getguid message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getguidDeferred.addErrback(handleFailure)
def getguided(result):
print frm+" has received a getguid reply from ", dest
print "result = ", result['reply']
return result['reply']
getguidDeferred.addCallback(getguided)
return getguidDeferred
def docopyht(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
copyhtDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a copyht message to ",dest
return ampProto.callRemote(Copyht,guid=GUID,address=self.address,port=self.port)
copyhtDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the copyht message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
copyhtDeferred.addErrback(handleFailure)
def copyhted(result):
print frm+" has received a copyht reply from ", dest
print "result = ", result['reply']
self.putdata(result['reply'])
return result['reply']
copyhtDeferred.addCallback(copyhted)
return copyhtDeferred
def dogetclosest(self,address,port,guid1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
getclosestDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getclosest message to ",dest
return ampProto.callRemote(Getclosest,guid=guid1,address=self.address,port=self.port)
getclosestDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getclosest message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getclosestDeferred.addErrback(handleFailure)
def getclosested(result):
print frm+" has received a getclosest reply from", dest
print "result = ", result['reply']
return result['reply']
getclosestDeferred.addCallback(getclosested)
return getclosestDeferred
def dogetclosestpeer(self,address,port,guid1):
global GUID
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
getclosestpeerDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getclosestpeer message"
return ampProto.callRemote(Getclosest,guid=guid1,address=self.address,port=self.port)
getclosestpeerDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getclosestpeer message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getclosestpeerDeferred.addErrback(handleFailure)
def getclosestpeered(result):
print frm+" has received a getclosestpeer reply ", result['reply']
return result['reply']
getclosestpeerDeferred.addCallback(getclosestpeered)
def dosendtotg(self,address,port,hashvalue1,stopnumber1,length1,guid1,ts1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
sendtotgDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a sendtotg command to ",dest
return ampProto.callRemote(Sendtotg, hashvalue=hashvalue1, stopnumber=stopnumber1, length=length1,guid=guid1,ts=ts1,address=self.address,port=self.port)
sendtotgDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the sendtotg message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
sendtotgDeferred.addErrback(handleFailure)
def sendtotged(result):
print frm+" has received a sendttg reply from ", dest
print "result = ",result['reply']
return result['reply']
sendtotgDeferred.addCallback(sendtotged)
return sendtotgDeferred
def dosendpublickey(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
sendpublickeyDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a sendpublic message to ",dest
return ampProto.callRemote(Sendpublickey,guid=GUID,address=self.address,port=self.port)
sendpublickeyDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the sendpublic message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
sendpublickeyDeferred.addErrback(handleFailure)
def sendpublickeyed(result):
print frm+" has received a sendpublic reply from ",dest
print "result =",result['reply']
return result['reply']
sendpublickeyDeferred.addCallback(sendpublickeyed)
return sendpublickeyDeferred
def dogetnumtb(self,address,port):
global GUID
frm=str(self.address)+":"+str(self.port)
dest=str(address)+":"+str(port)
destination = TCP4ClientEndpoint(reactor, address, port)
getnumtbDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getnumtb message to ",dest
return ampProto.callRemote(Getnumtb,guid=GUID,address=self.address,port=self.port)
getnumtbDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getnumtb message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getnumtbDeferred.addErrback(handleFailure)
def getnumtbed(result):
print frm+" has received a getnumtb reply from ",dest
print "result = ",result['reply']
return result['reply']
getnumtbDeferred.addCallback(getnumtbed)
return getnumtbDeferred
def dogetnewaddress(self,address,port,account1):
global GUID
frm=str(self.address)+":"+str(self.port)
dest=str(address)+":"+str(port)
destination = TCP4ClientEndpoint(reactor, address, port)
getnewaddressDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getnewaddress message to ",dest
return ampProto.callRemote(Getnewaddress,guid=GUID,address=self.address,port=self.port,account=account1)
getnewaddressDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getnewaddress message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getnewaddressDeferred.addErrback(handleFailure)
def getnewaddressed(result):
print frm+" has received a getnewaddres reply from ",dest
print "result = ",result['reply']
return result['reply']
getnewaddressDeferred.addCallback(getnewaddressed)
return getnewaddressDeferred
def doencryptdata(self,address,port,data1):
global GUID
frm=str(self.address)+":"+str(self.port)
dest=str(address)+":"+str(port)
destination = TCP4ClientEndpoint(reactor, address, port)
encryptdataDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a encryptdata message to ",dest
return ampProto.callRemote(Encryptdata,guid=GUID,address=self.address,port=self.port,data=data1)
encryptdataDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the encryptdata message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
encryptdataDeferred.addErrback(handleFailure)
def encryptdataed(result):
print frm+" has received a encryptdata reply from ",dest
print "result = ",result['reply']
return result['reply']
encryptdataDeferred.addCallback(encryptdataed)
return encryptdataDeferred
def doupdatetb(self,address,port,thash1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
updatetbDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a updatetb message to ",dest
return ampProto.callRemote(Updatetb,thash=thash1,guid=GUID,address=self.address,port=self.port)
updatetbDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the updatetb message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
updatetbDeferred.addErrback(handleFailure)
def updatetbed(result):
# store transaction and repeat if necessay
print frm+" has received an updatetb reply from ",dest
print "result = ",result['reply']
return result['reply']
updatetbDeferred.addCallback(updatetbed)
return updatetbDeferred
def doboardcasttrans(self,address,port,txid1,thash1,ts1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
boardcasttransDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a boardcasttrans message to ",dest
return ampProto.callRemote(Boardcasttrans,txid=txid1,thash=thash1,ts=ts1,guid=GUID, address=self.address,port=self.port)
boardcasttransDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the boardcasttrans message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
boardcasttransDeferred.addErrback(handleFailure)
def boardcasttransed(result):
print frm+" has received a boardcasttrans reply from ",dest
print "result = ",result['reply']
return result['reply']
boardcasttransDeferred.addCallback(boardcasttransed)
return boardcasttransDeferred
def dogeneratecoin(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
generatecoinDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a generatecoin message to ",dest
return ampProto.callRemote(Generatecoin,guid=GUID,address=self.address,port=self.port)
generatecoinDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the generatecoin message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
generatecoinDeferred.addErrback(handleFailure)
def generatecoined(result):
print frm+" has received a generatecoin reply from ",dest
print "result = ",result['reply']
return result['reply']
generatecoinDeferred.addCallback(generatecoined)
return generatecoinDeferred
def doacceptcoin(self,address,port,txid1,thash1,ts1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
acceptcoinDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a acceptcoin message to ",dest
return ampProto.callRemote(Acceptcoin,txid=txid1,thash=thash1,ts=ts1,guid=GUID, address=self.address,port=self.port)
acceptcoinDeferred.addCallback(connected)
def handleFailure(f):
# have to add reset tour because if it reach here than something went wrong with the tour
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the acceptcoin message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
acceptcoinDeferred.addErrback(handleFailure)
def acceptcoined(result):
print frm+" has received an acceptcoin reply from ",dest
print "result = ",result['reply']
return result['reply']
acceptcoinDeferred.addCallback(acceptcoined)
return acceptcoinDeferred
def dogetleader(self,address1,port1,guid1):
global GUID
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
getleaderDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getleader message to ",dest
return ampProto.callRemote(Getleader,clientaddress=self.address1,clientport=self.port1,guid=guid1,address=self.address,port=self.port)
getleaderDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getleader message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getleaderDeferred.addErrback(handleFailure)
def getleadered(result):
print frm+" has received a getleader reply from ",dest
print "result = ",result['reply']
return result['reply']
getleaderDeferred.addCallback(getleadered)
return getleaderDeferred
def dosetleader(self,address1,port1,clientaddress1,clientport1,guid1):
global self.filename,self.address,self.port
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
setleaderDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a setleader message to ",dest
return ampProto.callRemote(Setleader,clientaddress=clientaddress1,clientport=clientport1,guid=guid1,address=self.address,port=self.port)
setleaderDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the setleader message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
setleaderDeferred.addErrback(handleFailure)
def setleadered(result):
print frm+" has received a setleader reply from ",dest
print "result = ",result['reply']
return result['reply']
setleaderDeferred.addCallback(setleadered)
return setleaderDeferred
def dosendtransaction(self,address1,port1,coinhash1,receiveraddress1,message1):
global GUID
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
sendtransactionDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a sendtransaction message to ",dest
return ampProto.callRemote(Sendtransaction,guid=GUID,address=self.address1,port=self.port1,coinhash=coinhash1, receiveraddress=receiveraddress1, message=message1)
sendtransactionDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the sendtransaction message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
sendtransactionDeferred.addErrback(handleFailure)
def sendtransactioned(result):
print frm+" has received a sendtransaction reply from ",dest
print "result = ",result['reply']
return result['reply']
sendtransactionDeferred.addCallback(sendtransactioned)
return sendtransactionDeferred
def dogettransaction(self,address1,port1):
global GUID
dest=str(address1)+":"+str(port1)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address1, port1)
gettransactionDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a gettransaction message to ",dest
return ampProto.callRemote(Gettransaction,guid=GUID,address=self.address1,port=self.port1)
gettransactionDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the gettransaction message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
gettransactionDeferred.addErrback(handleFailure)
def gettransactioned(result):
print frm+" has received a gettransaction reply from ",dest
print "result = ",result['reply']
return result['reply']
gettransactionDeferred.addCallback(gettransactioned)
return gettransactionDeferred
def dogetpuzzle(self,address,port,guid1,h01,ts1,L1,arraytg1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
# h0,ts,L,arraytg
destination = TCP4ClientEndpoint(reactor, address, port)
getpuzzleDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a getpuzzle message to ",dest
return ampProto.callRemote(Getpuzzle,guid=guid1,h0=h01,ts=ts1,L=L1,arraytg=arraytg1,guid1=GUID, address=self.address,port=self.port)
getpuzzleDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the getpuzzle message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
getpuzzleDeferred.addErrback(handleFailure)
def getpuzzleed(result):
print frm+" has received a getpuzzle reply from ",dest
print "result = ", result['reply']
return result['reply']
getpuzzleDeferred.addCallback(getpuzzleed)
return getpuzzleDeferred
def doverifypuzzle(self,address,port,guid1,h01,hl1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
verifypuzzleDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a verifypuzzle message to ",dest
return ampProto.callRemote(Verifypuzzle,guid=guid1,h0=h01,hl=hl1,address=self.address,port=self.port)
verifypuzzleDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the verifypuzzle message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
verifypuzzleDeferred.addErrback(handleFailure)
def verifypuzzleed(result):
print frm+" has received a verifypuzzle reply from ",dest
print "result = ",result['reply']
return result['reply']
verifypuzzleDeferred.addCallback(verifypuzzleed)
return verifypuzzleDeferred
def doleaderinfo(self,address,port,guid1,address1,port1):
global GUID
frm=str(self.address)+":"+str(self.port)
dest=str(address)+":"+str(port)
destination = TCP4ClientEndpoint(reactor, address, port)
leaderinfoDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a leaderinfo message to ",dest
return ampProto.callRemote(Leaderinfo,guid=guid1,address=self.address1,port=self.port1)
leaderinfoDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the leaderinfo message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
leaderinfoDeferred.addErrback(handleFailure)
def leaderinfoed(result):
print frm+" has received a leaderinfo reply frm ",dest
print "result - ",result['reply']
return result['reply']
leaderinfoDeferred.addCallback(leaderinfoed)
return leaderinfoDeferred
def doinitleader(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
initleaderDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a initleader message to ",dest
return ampProto.callRemote(Initleader,message='init',guid=GUID,address=self.address,port=self.port)
initleaderDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the initleader message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
initleaderDeferred.addErrback(handleFailure)
def initleadered(result):
print frm+" has received an initleader reply from ",dest
print "result = ",result['reply']
return result['reply']
initleaderDeferred.addCallback(initleadered)
return initleaderDeferred
def doleave(self,address,port,guid1):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
leaveDeferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a leave message to ",dest
return ampProto.callRemote(Leave,guid=guid1,address=self.address,port=self.port)
leaveDeferred.addCallback(connected)
def handleFailure(f):
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the leave message"
ut.messagefail(message, guid)
ut.updateflag(guid)
return False
leaveDeferred.addErrback(handleFailure)
def leaveed(result):
print frm+" has received a leave reply from ",dest
print "result = ",result['reply']
return result['reply']
leaveDeferred.addCallback(leaveed)
return leaveDeferred
def dojoin(self,address,port):
global GUID
dest=str(address)+":"+str(port)
frm=str(self.address)+":"+str(self.port)
destination = TCP4ClientEndpoint(reactor, address, port)
join1Deferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a join message (getguid) to ",dest
return ampProto.callRemote(Getuuid,guid=GUID,address=self.address,port=self.port)
join1Deferred.addCallback(connected)
def handleFailure(f):
# have to add another peer to request information from
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the join (getguid) message"
ut.messagefail(message, guid)
ut.updateflag(guid)
print "no reply from the getguid message"
return False
join1Deferred.addErrback(handleFailure)
def join1ed(result):
print frm+" has received a join reply (getguid) from ", dest
print "result = ",result['reply']
return result['reply']
join1Deferred.addCallback(join1ed)
join2Deferred = connectProtocol(destination, AMP())
def connected(ampProto):
print frm+" is sending a join message (copydb) to ",dest
return ampProto.callRemote(Copyht,guid=GUID,address=self.address,port=self.port)
join2Deferred.addCallback(connected)
def handleFailure(f):
# have to add another peer to request information from
f.trap(RuntimeError)
ut=utility(self.filename,self.address,self.port)
message = "no reply from the join (copydb) message"
ut.messagefail(message, guid)
ut.updateflag(guid)
print "no reply from the copyht message"
return False
join2Deferred.addErrback(handleFailure)
def join2ed(result):
print frm+" has received a join reply (copydb) from ", dest
print "result = ",result['reply']
return result['reply']
join2Deferred.addCallback(join2ed)
def done(result):
print 'Done with join message. reply:', result
defer.DeferredList([join1Deferred, join2Deferred]).addCallback(done)
return defer.DeferredList
@defer.deferredGenerator
def putdata(self,data):
# put data received into database guid text, address text, port integer, flag integer, time real
# might want to put this with the command block for struture reason
global FNAME
ht=hashtable()
dbpool=ht.tconnectdb(FNAME)
rows=data.split(":")
for i in rows:
row=i.split(",")
guid=row[0]
address=row[1]
port=row[2]
flag=row[3]
time1=row[4]
wfd = defer.waitForDeferred(ht.taddht(dbpool,str(guid),str(address),int(port),int(flag),float(time1)))
yield wfd
return
class guidedtour:
# Length of tour
MAXL = 10
MINL = 5
# Number of tour guides
MAXTG = 10
MINTG = 5
# Tour Guide list [guid,ks]
tglist=[]
def __init__(self,filename,address,port):
self.filename = filename
self.address = address
self.port = port
def getlength(self):
# L length of the tour
L=random.randint(self.MINL,self.MAXL)
return L
def gettourguides(self):
# The tour guides will be chosen from the leaders hash table
# A suitable range of tour guides will be determine
# process for the leader to choose the tour guides
# 1) The leader will randomly choose the number of tour guides from its hash table (N)
# 2) The leader will send a request for a secret key from each of its tour guide
# 3) The address and secret key from each tour guide will be stored in an array (TG)
# Do not need this method anymore!!!!!
gtlist=[]
N=random.randint(self.MINTG,self.MAXTG)
ht=hashtable()
conn,c=ht.connectdb(self.filename)
peerlist=ht.getallguidht(c)
count1=ht.numht(c)
print "peerlist=",peerlist
for i in range(1,N):
gt = random.randint(0,count1-1)
print "gt=",gt
gtlist.append(peerlist[gt])
return gtlist
def getleader(self):
# process to find leader:
# 1) The client peer will generate a random guid
# 2) The client will send a request to the peer the is closest to this random guid
# 3) That second peer will generate another random guid and send a request to the closest peer to this guid
# 4) This third peer will become the leader and send back to the second peer a secret key
# 5) This secret key, leader's address and its guid will be sent back to the client
# 6) The client will than send the secret key and client guid to the leader to initiate the guided tour
ut=utility(self.filename,self.address,self.port)
zt=Zeit()
guid = ut.generateguid()
guid1,address,port = zt.getclosestpeer(guid)
# a command that tells guid1 to choose a leader (maybe use a hash to make sure it came from guid1
return
def sendpuzzle(self):
global Clientaddress,Clientport,Clientguid,GTlist
cc=clientcommands()
en=encyption()
ks=en.getpublickey(self.filename)
L=self.getlength()
ts=self.gettimestamp()
h0=self.makepuzzle(Clientguid,L,ts,ks)
cc.dogetpuzzle(Clientaddress,int(Clientport),Clientguid,h0,ts,L,GTlist)
#cc.dogetpuzzle('127.0.0.1',1234,'1111','ho1111111',1111.22,5,'arr1arr2')
return
def makepuzzle(self,guid,L,ts,ks):
# h_{0} = hash(A_{x}\; ||\; L\; ||\; ts\; ||\; K_{s})
# Ax - guid of the client
# hash - a hash function will use sha1
# will send back h0, L, array of tour guides address
# will have to figures out ts (coarse timestamp)
data1=str(guid)+":"+str(L)+":"+str(ts)+":"+str(ks)
puzzle=hashlib.sha256(data1).hexdigest()
return puzzle
def getpuzzle(self,guid,h0,ts,L,arraytg):
# store the data locally for use bt the client for the tour
global Leaderaddress, Leaderport
tourlist=[]
print "Data has been received for the tour guid-"+guid+" h0-"+h0+" ts-"+str(ts)+" L-"+str(L)+" arraytg-"+arraytg
list1=arraytg.split(":")
for line in list1:
list2=line.split(",")
print "list2=",list2
tourlist.append(list2)
self.runtour(h0,L,Leaderaddress,Leaderport,guid,ts,tourlist)
return
def gettourindex(self,hashvalue,N):
# S_{l}=(h_{l-1}\; mod\; N)
# h_{l} = hash(h_{l-1}\; ||\; l\; ||\; L\; ||\; A_{x}\; ||\; ts\; ||\; k_{js})
# N - number of tour guides
tourindex = long(hashvalue,16) % int(N)
return tourindex
def sendtoguide(hashvalue,stopnumber,L):
return
@defer.deferredGenerator
def runtour(self,h0,L,leaderaddress,leaderport,clientguid,ts,tourlist):
hi=h0
cc=clientcommands()
for l in range(1,int(L)):
tourindex=self.gettourindex(h0,len(tourlist))
print "tourindex=",tourindex
print "l=",l
print "tourlist=",tourlist[int(l)]
wfd = defer.waitForDeferred(cc.dosendtotg(tourlist[l][1],int(tourlist[l][2]),hi,l,L,clientguid,ts))
yield wfd
hi = str(wfd.getResult())
wfd = defer.waitForDeferred(cc.doverifypuzzle(leaderaddress,leaderport,clientguid,h0,hi))
yield wfd
result = wfd.getResult()
if (result):
# store transaction and boardcast
# send the result back to the client so it can do the same
print "The puzzle is good"
else:
# send a message to the client to rerun the tour
print "The puzzle is no good"
return
def tourguideverify(self,hashvalue,stopnumber,length,guid,ts):
en = encyption()
ks=en.getpublickey()
hashstr=str(hashvalue)+":"+str(stopnumber)+":"+str(length)+":"+str(guid)+":"+str(ts)+":"+str(ks)
print "hashstr=",hashstr
result=hashlib.sha256(hashstr).hexdigest()
return result
def finishtour(self,firsthash,lasthash):
# cleanup the tour
return
def verifypuzzle(self,guid,firsthash,lasthash):
# leader table that holds: sharedkeys, address, port, guid, timestamp
# might want to move this to a tread
# Retrieve L, ts and clientguid from storage
# Do not need this method anymore
flag=0
keylist=[]
h0=firsthash
ht=hashtable()
N=ht.numtg()
keyslist=ht.getkeys()
for i in range(0,L):
tourindex=self.gettourindex(h0,N)
key=keylist[tourindex]
h0=self.tourguideverify(h0,i+1,L,clientguid,key,ts)
ht.deleteleader()
if (h0==lasthash):
result=True
#store the transaction and boardcast it
else:
result=False
return result
def gettimestamp(self):
# Ts course timestamp
time1=time.time()
return time1
@defer.deferredGenerator
def getsharedsecret(self,gtlist):
# Ks short live secret
global GTlist
ht=hashtable()
cc=clientcommands()
gt=guidedtour()
timestamp=self.gettimestamp()
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
for guid in gtlist:
wfd = defer.waitForDeferred(ht.tgetaddress(dbpool,guid1))
yield wfd
address1,port1 = wfd.getResult()
#address,port=ht.getaddress(c,guid)
GTlist=GTlist+str(guid)+","+str(address)+","+str(port)+":"
# do sentpubkey
#sharedkey=cc.dosendpublickey(address,port)
wfd = defer.waitForDeferred(cc.dosendpublickey(address,port))
yield wfd
sharedkey = str(wfd.getResult())
# store the secret key in the local database
wfd = defer.waitForDeferred(ht.taddleader(dbpool,sharedkey, address, port, guid, timestamp))
yield wfd
#ht.addleader(conn,c,sharedkey, address, port, guid, timestamp)
GTlist=GTlist[:-1]
gt.sendpuzzle()
return
class controller: # this works yeah
# every hour ping to say i am still coonected to the nextwork
# every hour do an update transaction request
# every dat post to the websites for bootstrapping/peer location (also when first jogging into the network)
def __init__(self,filename,address,port):
self.filename = filename
self.address = address
self.port = port
def startupdatetranstimer(self):
t = task.LoopingCall(self.dotimedping)
t.start(20) # call every 3600 seconds = hour
# l.stop() will stop the looping calls
return t
def stopupdatetranstimer(t):
t.stop()
return t
@defer.deferredGenerator
def dotimedupdatetrans(self):
# might have to lock database to make sure data is not corrupted
#threading.Timer(5.0, timedping).start()
# might want to limit the amount of opem connection to some maxvalue
print "Update Transaction Block timer event"
cc=clientcommands()
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
guidlist = wfd.getResult()
#guidlist = ht.getallguidht(c)
for guid in guidlist:
address,port=ht.getaddress(c,guid)
wfd = defer.waitForDeferred(cc.doping(address,port,guid))
yield wfd
result = cc.doping(str(address),int(port),guid)
ht.tclosedb(dbpool)
return
def startpingtimer(self):
t = task.LoopingCall(self.dotimedping)
t.start(20) # call every 3600 seconds = hour
# l.stop() will stop the looping calls
return t
def stoppingtimer(t):
t.stop()
return t
@defer.deferredGenerator
def dotimedping(self):
# might have to lock database to make sure data is not corrupted
#threading.Timer(5.0, timedping).start()
# might want to limit the amount of opem connection to some maxvalue
print "Update Ping timer event"
cc=clientcommands()
ht=hashtable()
dbpool=ht.tconnectdb(self.filename)
#conn,c=ht.connectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetallguidht(dbpool))
yield wfd
guidlist = wfd.getResult()
#guidlist = ht.getallguidht(c)
for guid in guidlist:
address,port=ht.getaddress(c,guid)
wfd = defer.waitForDeferred(cc.doping(address,port,guid))
yield wfd
result = cc.doping(str(address),int(port),guid)
ht.closedb(conn)
return
def timedping(self):
reactor.callInThread(self.dotimedping)
return
def printmenu():
print "==========AMP Commands==========="
print "0) Quit"
print "1) Test ping command"
print "2) Test getguid command"
print "3) Test copyht command"
print "4) Test getclosest command"
print "5) Test sendpublickey command"
print "6) Test join command"
print "7) Test sendtotg command"
print "8) Test boardcasttrans command"
print "9) Test generatecoin command"
print "10)Test getnumtb command"
print "11)Test getleader command"
print "12)Test setleader command"
print "13)Test getpuzzle command"
print "14)Test verifypuzzle command"
print "15)Test acceptcoin command"
print "16)Test updatetb command"
print "17)Test leave command"
print "18)Test leaderinfo command"
print "19)Test initleader command"
print "99) to see the menu"
def startservers():
# Start the amp server
global PORT
port1=raw_input("What port do you want the amp server to listen?\n")
PORT=int(port1)
pf = Factory()
pf.protocol = Zeit
reactor.listenTCP(PORT, pf)
print "AMP servers has started ...."
@defer.deferredGenerator
def docommand(str1):
global ALIVE
arglist=[]
cc=clientcommands(FNAME,ADDRESS,PORT)
ut=utility(FNAME,ADDRESS,PORT)
ht=hashtable()
randomguid=ut.generateguid()
dbpool=ht.tconnectdb(FNAME)
wfd = defer.waitForDeferred(ht.tgetguid(dbpool))
yield wfd
guid = str(wfd.getResult())
ht.tclosedb(dbpool)
comlist=str1.split(":")
com1=str(comlist[0])
address=str(comlist[1])
port=int(comlist[2])
if (com1=="0"):
ALIVE=0
print "Quiting..."
reactor.stop()
elif (com1=="1"):
cc.doping(address,port,guid)
elif (com1=="2"):
cc.dogetguid(address,port)
elif (com1=="3"):
cc.docopyht(address,port)
elif (com1=="4"):
cc.dogetclosest(address,port,randomguid)
elif (com1=="5"):
cc.dosendpublickey(address,port)
elif (com1=="6"):
cc.dojoin(address,port)
elif (com1=="7"):
cc.dosendtotg(address,port,'11111111111111111111111111',2,4,'2222',3333)
elif (com1=="8"):
cc.doboardcasttrans(address,port,'7','th966',111.12)
elif (com1=="9"):
cc.dogeneratecoin(address,port)
elif (com1=="10"):
cc.dogetnumtb(address,port)
elif (com1=="11"):
cc.dogetleader(address,port,guid)
elif (com1=="12"):
cc.dosetleader(address,port,guid)
elif (com1=="13"):
cc.dogetpuzzle(address,port,'1111','ho1111111',1111.22,5,'arr1arr2')
elif (com1=="14"):
cc.doverifypuzzle(address,port,'2222','h11111','h222222')
elif (com1=="15"):
cc.doacceptcoin(address,port,'2','3333',111.11)
elif (com1=="16"):
cc.doupdatetb(address,port,'4444')
elif (com1=="17"):
cc.doleave(address,port,guid)
elif (com1=="18"):
cc.doleaderinfo(address,port,'2222','127.0.0.1',1234)
elif (com1=="19"):
cc.doinitleader(address,port)
elif (com1=="99"):
printmenu()
else:
print "Did not recognized the command"
return
def getinput():
# Get input for menu
global ALIVE
if (ALIVE==1):
com1=raw_input("What command do you want to test?\n")
if (int(com1)>0 and int(com1)<99):
address=raw_input("What is the address for this command?\n")
flag1=False
while (flag1==False):
port=raw_input("What is the port for this command?\n")
flag1=port.isdigit()
if (flag1==False):
print "The port has to be an integer"
str1=str(com1)+":"+str(self.address)+":"+str(self.port)
else:
str1=str(com1)+":1:1"
return str1
def runagain(d):
runinput()
return
def runinput():
d = threads.deferToThread(getinput)
d.addCallback(docommand)
d.addCallback(runagain)
return d
def main():
print "Testing platform for the zeitcoin network"
startservers()
printmenu()
runinput()
reactor.run()
sys.exit(0)
if __name__ == '__main__':
main()
| mit | -1,211,593,257,560,641,300 | 35.476214 | 223 | 0.71134 | false |
annoviko/pyclustering | pyclustering/cluster/examples/somsc_examples.py | 1 | 2482 | """!
@brief Examples of usage and demonstration of abilities of SOM-SC algorithm in cluster analysis.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.samples.definitions import SIMPLE_SAMPLES, FCPS_SAMPLES;
from pyclustering.cluster import cluster_visualizer;
from pyclustering.cluster.somsc import somsc;
from pyclustering.utils import read_sample;
from pyclustering.utils import timedcall;
def template_clustering(path, amount_clusters, epouch = 100, ccore = True):
sample = read_sample(path);
somsc_instance = somsc(sample, amount_clusters, epouch, ccore);
(ticks, _) = timedcall(somsc_instance.process);
clusters = somsc_instance.get_clusters();
print("Sample: ", path, "\t\tExecution time: ", ticks, "\n");
visualizer = cluster_visualizer();
visualizer.append_clusters(clusters, sample);
visualizer.show();
def cluster_sample1():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, 2);
def cluster_sample2():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, 3);
def cluster_sample3():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, 4);
def cluster_sample4():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, 5);
def cluster_sample5():
template_clustering(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, 6);
def cluster_elongate():
template_clustering(SIMPLE_SAMPLES.SAMPLE_ELONGATE, 2);
def cluster_lsun():
template_clustering(FCPS_SAMPLES.SAMPLE_LSUN, 3);
def cluster_target():
template_clustering(FCPS_SAMPLES.SAMPLE_TARGET, 6);
def cluster_two_diamonds():
template_clustering(FCPS_SAMPLES.SAMPLE_TWO_DIAMONDS, 2);
def cluster_wing_nut():
template_clustering(FCPS_SAMPLES.SAMPLE_WING_NUT, 2);
def cluster_chainlink():
template_clustering(FCPS_SAMPLES.SAMPLE_CHAINLINK, 2);
def cluster_hepta():
template_clustering(FCPS_SAMPLES.SAMPLE_HEPTA, 7);
def cluster_tetra():
template_clustering(FCPS_SAMPLES.SAMPLE_TETRA, 4);
def cluster_engy_time():
template_clustering(FCPS_SAMPLES.SAMPLE_ENGY_TIME, 2);
cluster_sample1();
cluster_sample2();
cluster_sample3();
cluster_sample4();
cluster_sample5();
cluster_elongate();
cluster_lsun();
cluster_target();
cluster_two_diamonds();
cluster_wing_nut();
cluster_chainlink();
cluster_hepta();
cluster_tetra();
cluster_engy_time(); | gpl-3.0 | -6,543,249,870,102,792,000 | 25.296703 | 96 | 0.696213 | false |
Spiderlover/Toontown | toontown/estate/DistributedAnimatedStatuary.py | 6 | 1976 | from pandac.PandaModules import NodePath
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.estate import DistributedStatuary
from toontown.estate import GardenGlobals
from direct.actor import Actor
class DistributedAnimatedStatuary(DistributedStatuary.DistributedStatuary):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedAnimatedStatuary')
def __init__(self, cr):
self.notify.debug('constructing DistributedAnimatedStatuary')
DistributedStatuary.DistributedStatuary.__init__(self, cr)
def loadModel(self):
self.rotateNode = self.plantPath.attachNewNode('rotate')
self.model = Actor.Actor()
animPath = self.modelPath + self.anims[1]
self.model.loadModel(self.modelPath + self.anims[0])
self.model.loadAnims(dict([[self.anims[1], animPath]]))
colNode = self.model.find('**/+CollisionNode')
if self.typeIndex == 234:
colNode.setScale(0.5)
if not colNode.isEmpty():
score, multiplier = ToontownGlobals.PinballScoring[ToontownGlobals.PinballStatuary]
if self.pinballScore:
score = self.pinballScore[0]
multiplier = self.pinballScore[1]
scoreNodePath = NodePath('statuary-%d-%d' % (score, multiplier))
colNode.setName('statuaryCol')
scoreNodePath.reparentTo(colNode.getParent())
colNode.reparentTo(scoreNodePath)
self.model.setScale(self.worldScale)
self.model.reparentTo(self.rotateNode)
self.model.loop(self.anims[1])
def setTypeIndex(self, typeIndex):
DistributedStatuary.DistributedStatuary.setTypeIndex(self, typeIndex)
self.anims = GardenGlobals.PlantAttributes[typeIndex]['anims']
def setupShadow(self):
if self.typeIndex == 234:
pass
else:
DistributedStatuary.DistributedStatuary.setupShadow()
| mit | 1,556,222,840,028,331,500 | 42.911111 | 95 | 0.693826 | false |
tempbottle/Nuitka | nuitka/nodes/ConstantRefNodes.py | 1 | 8791 | # Copyright 2015, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Node for constant expressions. Can be all common built-in types.
"""
from logging import warning
from nuitka.__past__ import iterItems, unicode # pylint: disable=W0622
from nuitka.Constants import (
getConstantIterationLength,
isConstant,
isHashable,
isIndexConstant,
isIterableConstant,
isMutable,
isNumberConstant
)
from nuitka.Options import isDebug
from .NodeBases import CompileTimeConstantExpressionMixin, NodeBase
class ExpressionConstantRef(CompileTimeConstantExpressionMixin, NodeBase):
kind = "EXPRESSION_CONSTANT_REF"
user_provided = False
def __init__(self, constant, source_ref, user_provided = False):
NodeBase.__init__(self, source_ref = source_ref)
CompileTimeConstantExpressionMixin.__init__(self)
assert isConstant(constant), repr(constant)
self.constant = constant
# Memory saving method, have the attribute only where necessary.
if user_provided:
self.user_provided = user_provided
if not user_provided and isDebug():
try:
size = len(constant)
if type(constant) in (str, unicode):
max_size = 1000
else:
max_size = 256
if size > max_size:
warning(
"Too large constant (%s %d) encountered at %s.",
type(constant),
size,
source_ref.getAsString()
)
except TypeError:
pass
def __repr__(self):
return "<Node %s value %s at %s %s>" % (
self.kind,
self.constant,
self.source_ref.getAsString(),
self.user_provided
)
def getDetails(self):
return {
"constant" : self.constant,
"user_provided" : self.user_provided
}
def getDetailsForDisplay(self):
return {
"constant" : repr(self.constant),
"user_provided" : self.user_provided
}
def getDetail(self):
return repr(self.constant)
def computeExpression(self, constraint_collection):
# Cannot compute any further, this is already the best.
return self, None, None
def computeExpressionCall(self, call_node, constraint_collection):
from .NodeMakingHelpers import makeRaiseExceptionReplacementExpression, wrapExpressionWithSideEffects
new_node = wrapExpressionWithSideEffects(
new_node = makeRaiseExceptionReplacementExpression(
expression = self,
exception_type = "TypeError",
exception_value = "'%s' object is not callable" % type(self.constant).__name__
),
old_node = call_node,
side_effects = call_node.extractPreCallSideEffects()
)
return new_node, "new_raise", "Predicted call of constant value to exception raise."
def getCompileTimeConstant(self):
return self.constant
getConstant = getCompileTimeConstant
def isMutable(self):
return isMutable(self.constant)
def isKnownToBeHashable(self):
return isHashable(self.constant)
def isNumberConstant(self):
return isNumberConstant(self.constant)
def isIndexConstant(self):
return isIndexConstant(self.constant)
def isStringConstant(self):
return type(self.constant) is str
def isIndexable(self):
return self.constant is None or self.isNumberConstant()
def isKnownToBeIterable(self, count):
if isIterableConstant(self.constant):
return count is None or \
getConstantIterationLength(self.constant) == count
else:
return False
def isKnownToBeIterableAtMin(self, count):
length = self.getIterationLength()
return length is not None and length >= count
def canPredictIterationValues(self):
return self.isKnownToBeIterable(None)
def getIterationValue(self, count):
assert count < len(self.constant)
return ExpressionConstantRef(
constant = self.constant[count],
source_ref = self.source_ref
)
def getIterationValues(self):
source_ref = self.getSourceReference()
return tuple(
ExpressionConstantRef(
constant = value,
source_ref = source_ref,
user_provided = self.user_provided
)
for value in
self.constant
)
def isMapping(self):
return type(self.constant) is dict
def isMappingWithConstantStringKeys(self):
assert self.isMapping()
for key in self.constant:
if type(key) not in (str, unicode):
return False
return True
def getMappingPairs(self):
assert self.isMapping()
pairs = []
source_ref = self.getSourceReference()
for key, value in iterItems(self.constant):
pairs.append(
ExpressionConstantRef(
constant = key,
source_ref = source_ref
),
ExpressionConstantRef(
constant = value,
source_ref = source_ref
)
)
return pairs
def getMappingStringKeyPairs(self):
assert self.isMapping()
pairs = []
source_ref = self.getSourceReference()
for key, value in iterItems(self.constant):
pairs.append(
(
key,
ExpressionConstantRef(
constant = value,
source_ref = source_ref
)
)
)
return pairs
def isBoolConstant(self):
return type(self.constant) is bool
def mayHaveSideEffects(self):
# Constants have no side effects
return False
def extractSideEffects(self):
# Constants have no side effects
return ()
def mayRaiseException(self, exception_type):
# Constants won't raise any kind of exception.
return False
def getIntegerValue(self):
if self.isNumberConstant():
return int(self.constant)
else:
return None
def getStringValue(self):
if self.isStringConstant():
return self.constant
else:
return None
def getIterationLength(self):
if isIterableConstant(self.constant):
return getConstantIterationLength(self.constant)
else:
return None
def isIterableConstant(self):
return isIterableConstant(self.constant)
def getStrValue(self):
if type(self.constant) is str:
# Nothing to do.
return self
else:
try:
return ExpressionConstantRef(
constant = str(self.constant),
user_provided = self.user_provided,
source_ref = self.getSourceReference(),
)
except UnicodeEncodeError:
# Unicode constants may not be possible to encode.
return None
def computeExpressionIter1(self, iter_node, constraint_collection):
if type(self.constant) in (list, set, frozenset, dict):
result = ExpressionConstantRef(
constant = tuple(self.constant),
user_provided = self.user_provided,
source_ref = self.getSourceReference()
)
self.replaceWith(result)
return (
iter_node,
"new_constant", """\
Iteration over constant %s changed to tuple.""" % type(self.constant).__name__
)
return iter_node, None, None
| apache-2.0 | 5,454,924,322,633,401,000 | 28.599327 | 109 | 0.579911 | false |
fabriziodemaria/luigi | luigi/postgres.py | 13 | 13451 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implements a subclass of :py:class:`~luigi.target.Target` that writes data to Postgres.
Also provides a helper task to copy data into a Postgres table.
"""
import datetime
import logging
import re
import tempfile
from luigi import six
import luigi
from luigi.contrib import rdbms
logger = logging.getLogger('luigi-interface')
try:
import psycopg2
import psycopg2.errorcodes
import psycopg2.extensions
except ImportError:
logger.warning("Loading postgres module without psycopg2 installed. Will crash at runtime if postgres functionality is used.")
class MultiReplacer(object):
"""
Object for one-pass replace of multiple words
Substituted parts will not be matched against other replace patterns, as opposed to when using multipass replace.
The order of the items in the replace_pairs input will dictate replacement precedence.
Constructor arguments:
replace_pairs -- list of 2-tuples which hold strings to be replaced and replace string
Usage:
.. code-block:: python
>>> replace_pairs = [("a", "b"), ("b", "c")]
>>> MultiReplacer(replace_pairs)("abcd")
'bccd'
>>> replace_pairs = [("ab", "x"), ("a", "x")]
>>> MultiReplacer(replace_pairs)("ab")
'x'
>>> replace_pairs.reverse()
>>> MultiReplacer(replace_pairs)("ab")
'xb'
"""
# TODO: move to misc/util module
def __init__(self, replace_pairs):
"""
Initializes a MultiReplacer instance.
:param replace_pairs: list of 2-tuples which hold strings to be replaced and replace string.
:type replace_pairs: tuple
"""
replace_list = list(replace_pairs) # make a copy in case input is iterable
self._replace_dict = dict(replace_list)
pattern = '|'.join(re.escape(x) for x, y in replace_list)
self._search_re = re.compile(pattern)
def _replacer(self, match_object):
# this method is used as the replace function in the re.sub below
return self._replace_dict[match_object.group()]
def __call__(self, search_string):
# using function replacing for a per-result replace
return self._search_re.sub(self._replacer, search_string)
# these are the escape sequences recognized by postgres COPY
# according to http://www.postgresql.org/docs/8.1/static/sql-copy.html
default_escape = MultiReplacer([('\\', '\\\\'),
('\t', '\\t'),
('\n', '\\n'),
('\r', '\\r'),
('\v', '\\v'),
('\b', '\\b'),
('\f', '\\f')
])
class PostgresTarget(luigi.Target):
"""
Target for a resource in Postgres.
This will rarely have to be directly instantiated by the user.
"""
marker_table = luigi.configuration.get_config().get('postgres', 'marker-table', 'table_updates')
# Use DB side timestamps or client side timestamps in the marker_table
use_db_timestamps = True
def __init__(
self, host, database, user, password, table, update_id, port=None
):
"""
Args:
host (str): Postgres server address. Possibly a host:port string.
database (str): Database name
user (str): Database user
password (str): Password for specified user
update_id (str): An identifier for this data set
port (int): Postgres server port.
"""
if ':' in host:
self.host, self.port = host.split(':')
else:
self.host = host
self.port = port
self.database = database
self.user = user
self.password = password
self.table = table
self.update_id = update_id
def touch(self, connection=None):
"""
Mark this update as complete.
Important: If the marker table doesn't exist, the connection transaction will be aborted
and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
# TODO: test this
connection = self.connect()
connection.autocommit = True # if connection created here, we commit it here
if self.use_db_timestamps:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table)
VALUES (%s, %s)
""".format(marker_table=self.marker_table),
(self.update_id, self.table))
else:
connection.cursor().execute(
"""INSERT INTO {marker_table} (update_id, target_table, inserted)
VALUES (%s, %s, %s);
""".format(marker_table=self.marker_table),
(self.update_id, self.table,
datetime.datetime.now()))
# make sure update is properly marked
assert self.exists(connection)
def exists(self, connection=None):
if connection is None:
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
try:
cursor.execute("""SELECT 1 FROM {marker_table}
WHERE update_id = %s
LIMIT 1""".format(marker_table=self.marker_table),
(self.update_id,)
)
row = cursor.fetchone()
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE:
row = None
else:
raise
return row is not None
def connect(self):
"""
Get a psycopg2 connection object to the database where the table is.
"""
connection = psycopg2.connect(
host=self.host,
port=self.port,
database=self.database,
user=self.user,
password=self.password)
connection.set_client_encoding('utf-8')
return connection
def create_marker_table(self):
"""
Create marker table if it doesn't exist.
Using a separate connection since the transaction might have to be reset.
"""
connection = self.connect()
connection.autocommit = True
cursor = connection.cursor()
if self.use_db_timestamps:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP DEFAULT NOW())
""".format(marker_table=self.marker_table)
else:
sql = """ CREATE TABLE {marker_table} (
update_id TEXT PRIMARY KEY,
target_table TEXT,
inserted TIMESTAMP);
""".format(marker_table=self.marker_table)
try:
cursor.execute(sql)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.DUPLICATE_TABLE:
pass
else:
raise
connection.close()
def open(self, mode):
raise NotImplementedError("Cannot open() PostgresTarget")
class CopyToTable(rdbms.CopyToTable):
"""
Template task for inserting a data set into Postgres
Usage:
Subclass and override the required `host`, `database`, `user`,
`password`, `table` and `columns` attributes.
To customize how to access data from an input task, override the `rows` method
with a generator that yields each row as a tuple with fields ordered according to `columns`.
"""
def rows(self):
"""
Return/yield tuples or lists corresponding to each row to be inserted.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line.strip('\n').split('\t')
def map_column(self, value):
"""
Applied to each column of every row returned by `rows`.
Default behaviour is to escape special characters and identify any self.null_values.
"""
if value in self.null_values:
return r'\\N'
else:
return default_escape(six.text_type(value))
# everything below will rarely have to be overridden
def output(self):
"""
Returns a PostgresTarget representing the inserted dataset.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
def copy(self, cursor, file):
if isinstance(self.columns[0], six.string_types):
column_names = self.columns
elif len(self.columns[0]) == 2:
column_names = [c[0] for c in self.columns]
else:
raise Exception('columns must consist of column strings or (column string, type string) tuples (was %r ...)' % (self.columns[0],))
cursor.copy_from(file, self.table, null=r'\\N', sep=self.column_separator, columns=column_names)
def run(self):
"""
Inserts data generated by rows() into target table.
If the target table doesn't exist, self.create_table will be called to attempt to create the table.
Normally you don't want to override this.
"""
if not (self.table and self.columns):
raise Exception("table and columns need to be specified")
connection = self.output().connect()
# transform all data generated by rows() using map_column and write data
# to a temporary file for import using postgres COPY
tmp_dir = luigi.configuration.get_config().get('postgres', 'local-tmp-dir', None)
tmp_file = tempfile.TemporaryFile(dir=tmp_dir)
n = 0
for row in self.rows():
n += 1
if n % 100000 == 0:
logger.info("Wrote %d lines", n)
rowstr = self.column_separator.join(self.map_column(val) for val in row)
rowstr += "\n"
tmp_file.write(rowstr.encode('utf-8'))
logger.info("Done writing, importing at %s", datetime.datetime.now())
tmp_file.seek(0)
# attempt to copy the data into postgres
# if it fails because the target table doesn't exist
# try to create it by running self.create_table
for attempt in range(2):
try:
cursor = connection.cursor()
self.init_copy(connection)
self.copy(cursor, tmp_file)
self.post_copy(connection)
except psycopg2.ProgrammingError as e:
if e.pgcode == psycopg2.errorcodes.UNDEFINED_TABLE and attempt == 0:
# if first attempt fails with "relation not found", try creating table
logger.info("Creating table %s", self.table)
connection.reset()
self.create_table(connection)
else:
raise
else:
break
# mark as complete in same transaction
self.output().touch(connection)
# commit and clean up
connection.commit()
connection.close()
tmp_file.close()
class PostgresQuery(rdbms.Query):
"""
Template task for querying a Postgres compatible database
Usage:
Subclass and override the required `host`, `database`, `user`, `password`, `table`, and `query` attributes.
Override the `run` method if your use case requires some action with the query result.
Task instances require a dynamic `update_id`, e.g. via parameter(s), otherwise the query will only execute once
To customize the query signature as recorded in the database marker table, override the `update_id` property.
"""
def run(self):
connection = self.output().connect()
cursor = connection.cursor()
sql = self.query
logger.info('Executing query from task: {name}'.format(name=self.__class__))
cursor.execute(sql)
# Update marker table
self.output().touch(connection)
# commit and close connection
connection.commit()
connection.close()
def output(self):
"""
Returns a PostgresTarget representing the executed query.
Normally you don't override this.
"""
return PostgresTarget(
host=self.host,
database=self.database,
user=self.user,
password=self.password,
table=self.table,
update_id=self.update_id
)
| apache-2.0 | -2,892,266,080,129,253,000 | 33.489744 | 142 | 0.582782 | false |
tianocore/buildtools-BaseTools | Source/Python/UPT/Xml/ModuleSurfaceAreaXml.py | 2 | 37721 | ## @file
# This file is used to parse a Module file of .PKG file
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
ModuleSurfaceAreaXml
'''
from xml.dom import minidom
from Library.String import ConvertNEToNOTEQ
from Library.String import ConvertNOTEQToNE
from Library.String import GetStringOfList
from Library.String import IsMatchArch
from Library.Xml.XmlRoutines import XmlElement
from Library.Xml.XmlRoutines import XmlAttribute
from Library.Xml.XmlRoutines import XmlNode
from Library.Xml.XmlRoutines import XmlList
from Library.Xml.XmlRoutines import CreateXmlElement
from Object.POM.CommonObject import GuidVersionObject
from Object.POM.ModuleObject import BootModeObject
from Object.POM.ModuleObject import DepexObject
from Object.POM.ModuleObject import ModuleObject
from Object.POM.ModuleObject import EventObject
from Object.POM.ModuleObject import HobObject
from Object.POM.ModuleObject import SourceFileObject
from Object.POM.ModuleObject import PackageDependencyObject
from Object.POM.ModuleObject import ExternObject
from Object.POM.ModuleObject import BinaryFileObject
from Object.POM.ModuleObject import AsBuiltObject
from Object.POM.ModuleObject import BinaryBuildFlagObject
from Xml.CommonXml import ClonedFromXml
from Xml.CommonXml import HeaderXml
from Xml.CommonXml import HelpTextXml
from Xml.CommonXml import CommonDefinesXml
from Xml.CommonXml import LibraryClassXml
from Xml.CommonXml import UserExtensionsXml
from Xml.CommonXml import MiscellaneousFileXml
from Xml.CommonXml import FilenameXml
from Xml.GuidProtocolPpiXml import GuidXml
from Xml.GuidProtocolPpiXml import ProtocolXml
from Xml.GuidProtocolPpiXml import PpiXml
from Xml.PcdXml import PcdEntryXml
from Xml.XmlParserMisc import GetHelpTextList
from Library import GlobalData
from Library.Misc import GetSplitValueList
## BinaryFileXml
#
# represent the following XML item
#
# <BinaryFile>
# <Filename
# FileType=" FileType " {1}
# SupArchList=" ArchListType " {0,1}
# FeatureFlag=" FeatureFlagExpression " {0,1} >
# xs:anyURI
# </Filename> {1,}
# <AsBuilt> ... </AsBuilt> {0,}
# </BinaryFile> {1,}
#
class BinaryFileXml(object):
def __init__(self):
self.FileNames = []
self.AsBuiltList = []
self.PatchPcdValues = ''
self.PcdExValues = ''
self.LibraryInstances = ''
self.BuildFlags = ''
def FromXml(self, Item, Key):
if self.FileNames:
pass
BinaryFile = BinaryFileObject()
FilenameList = []
for SubItem in XmlList(Item, '%s/Filename' % Key):
Axml = FilenameXml()
Bxml = Axml.FromXml(SubItem, 'Filename')
FilenameList.append(Bxml)
BinaryFile.SetFileNameList(FilenameList)
if GlobalData.gIS_BINARY_INF:
AsBuiltList = []
for AsBuiltItem in XmlList(Item, '%s/AsBuilt' % Key):
AsBuilt = AsBuiltObject()
PatchPcdValueList = []
for SubItem in XmlList(AsBuiltItem, 'AsBuilt/PatchPcdValue'):
Axml = PcdEntryXml()
Bxml = Axml.FromXml(SubItem, 'PatchPcdValue')
PatchPcdValueList.append(Bxml)
AsBuilt.SetPatchPcdList(PatchPcdValueList)
PcdExValueList = []
for SubItem in XmlList(AsBuiltItem, 'AsBuilt/PcdExValue'):
Axml = PcdEntryXml()
Bxml = Axml.FromXml(SubItem, 'PcdExValue')
PcdExValueList.append(Bxml)
AsBuilt.SetPcdExList(PcdExValueList)
LibraryList = []
for SubItem in XmlList(Item, '%s/AsBuilt/LibraryInstances/GUID' % Key):
GuidVerObj = GuidVersionObject()
GUID = XmlElement(SubItem, 'GUID')
Version = XmlAttribute(XmlNode(SubItem, 'GUID'), 'Version')
GuidVerObj.SetGuid(GUID)
GuidVerObj.SetVersion(Version)
LibraryList.append(GuidVerObj)
if XmlList(Item, '%s/AsBuilt/LibraryInstances' % Key) and not LibraryList:
LibraryList = [None]
AsBuilt.SetLibraryInstancesList(LibraryList)
BuildFlagList = []
for SubItem in XmlList(Item, '%s/AsBuilt/BuildFlags' % Key):
BuildFlag = BuildFlagXml()
BuildFlagList.append(BuildFlag.FromXml2(SubItem, 'BuildFlags'))
AsBuilt.SetBuildFlagsList(BuildFlagList)
AsBuiltList.append(AsBuilt)
BinaryFile.SetAsBuiltList(AsBuiltList)
return BinaryFile
def ToXml(self, BinaryFile, Key):
if self.FileNames:
pass
NodeList = []
FilenameList = BinaryFile.GetFileNameList()
SupportArch = None
for Filename in FilenameList:
Tmp = FilenameXml()
NodeList.append(Tmp.ToXml(Filename, 'Filename'))
SupportArch = Filename.SupArchList
if GlobalData.gIS_BINARY_INF:
AsBuildList = BinaryFile.GetAsBuiltList()
PatchPcdValueList = AsBuildList.GetPatchPcdList()
PcdExList = AsBuildList.GetPcdExList()
LibGuidVerList = AsBuildList.GetLibraryInstancesList()
BuildFlagList = AsBuildList.GetBuildFlagsList()
AsBuiltNodeList = []
for Pcd in PatchPcdValueList:
if IsMatchArch(Pcd.SupArchList, SupportArch):
Tmp = PcdEntryXml()
AsBuiltNodeList.append(Tmp.ToXml4(Pcd, 'PatchPcdValue'))
for Pcd in PcdExList:
if IsMatchArch(Pcd.SupArchList, SupportArch):
Tmp = PcdEntryXml()
AsBuiltNodeList.append(Tmp.ToXml4(Pcd, 'PcdExValue'))
GuiVerElemList = []
for LibGuidVer in LibGuidVerList:
GuiVerElem = \
CreateXmlElement('GUID', LibGuidVer.GetLibGuid(), [], [['Version', LibGuidVer.GetLibVersion()]])
GuiVerElemList.append(GuiVerElem)
if len(GuiVerElemList) > 0:
LibGuidVerElem = CreateXmlElement('LibraryInstances', '', GuiVerElemList, [])
AsBuiltNodeList.append(LibGuidVerElem)
for BuildFlag in BuildFlagList:
Tmp = BuildFlagXml()
Elem = CreateXmlElement('BuildFlags', ''.join(BuildFlag), [], [])
AsBuiltNodeList.append(Elem)
if len(AsBuiltNodeList) > 0:
Element = CreateXmlElement('AsBuilt', '', AsBuiltNodeList, [])
NodeList.append(Element)
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = "BinaryFiles:"
for Item in self.FileNames:
Str = Str + '\n\t' + str(Item)
for Item in self.PatchPcdValues:
Str = Str + '\n\t' + str(Item)
for Item in self.PcdExValues:
Str = Str + '\n\t' + str(Item)
for Item in self.LibraryInstances:
Str = Str + '\n\t' + str(Item)
for Item in self.BuildFlags:
Str = Str + '\n\t' + str(Item)
return Str
##
# PackageXml
#
class PackageXml(object):
def __init__(self):
self.Description = ''
self.Guid = ''
self.Version = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.Description = XmlElement(Item, '%s/Description' % Key)
self.Guid = XmlElement(Item, '%s/GUID' % Key)
self.Version = XmlAttribute(XmlNode(Item, '%s/GUID' % Key), 'Version')
self.CommonDefines.FromXml(XmlNode(Item, '%s' % Key), Key)
PackageDependency = PackageDependencyObject()
PackageDependency.SetPackage(self.Description)
PackageDependency.SetGuid(self.Guid)
PackageDependency.SetVersion(self.Version)
PackageDependency.SetFeatureFlag(ConvertNOTEQToNE(self.CommonDefines.FeatureFlag))
PackageDependency.SetSupArchList(self.CommonDefines.SupArchList)
return PackageDependency
def ToXml(self, PackageDependency, Key):
if self.Guid:
pass
AttributeList = [['SupArchList', GetStringOfList(PackageDependency.GetSupArchList())],
['FeatureFlag', ConvertNEToNOTEQ(PackageDependency.GetFeatureFlag())], ]
Element1 = CreateXmlElement('GUID', PackageDependency.GetGuid(), [],
[['Version', PackageDependency.GetVersion()]])
NodeList = [['Description', PackageDependency.GetPackage()], Element1, ]
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Description = %s Guid = %s Version = %s %s" \
% (self.Description, self.Guid, self.Version, self.CommonDefines)
return Str
##
# ExternXml
#
class ExternXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.EntryPoint = ''
self.UnloadImage = ''
self.Constructor = ''
self.Destructor = ''
self.SupModList = ''
self.SupArchList = ''
self.HelpText = []
def FromXml(self, Item, Key):
self.CommonDefines.FromXml(Item, Key)
self.EntryPoint = XmlElement(Item, '%s/EntryPoint' % Key)
self.UnloadImage = XmlElement(Item, '%s/UnloadImage' % Key)
self.Constructor = XmlElement(Item, '%s/Constructor' % Key)
self.Destructor = XmlElement(Item, '%s/Destructor' % Key)
Extern = ExternObject()
Extern.SetEntryPoint(self.EntryPoint)
Extern.SetUnloadImage(self.UnloadImage)
Extern.SetConstructor(self.Constructor)
Extern.SetDestructor(self.Destructor)
if self.CommonDefines.SupModList:
Extern.SetSupModList(self.CommonDefines.SupModList)
if self.CommonDefines.SupArchList:
Extern.SetSupArchList(self.CommonDefines.SupArchList)
return Extern
def ToXml(self, Extern, Key):
if self.HelpText:
pass
NodeList = []
if Extern.GetEntryPoint():
NodeList.append(['EntryPoint', Extern.GetEntryPoint()])
if Extern.GetUnloadImage():
NodeList.append(['UnloadImage', Extern.GetUnloadImage()])
if Extern.GetConstructor():
NodeList.append(['Constructor', Extern.GetConstructor()])
if Extern.GetDestructor():
NodeList.append(['Destructor', Extern.GetDestructor()])
Root = CreateXmlElement('%s' % Key, '', NodeList, [])
return Root
def __str__(self):
Str = "EntryPoint = %s UnloadImage = %s Constructor = %s Destructor = %s %s" \
% (self.EntryPoint, self.UnloadImage, self.Constructor, self.Destructor, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# DepexXml
#
class DepexXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.Expression = None
self.HelpText = []
def FromXml(self, Item, Key):
if not Item:
return None
self.CommonDefines.FromXml(Item, Key)
self.Expression = XmlElement(Item, '%s/Expression' % Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Depex = DepexObject()
Depex.SetDepex(self.Expression)
Depex.SetModuleType(self.CommonDefines.SupModList)
Depex.SetSupArchList(self.CommonDefines.SupArchList)
Depex.SetFeatureFlag(self.CommonDefines.FeatureFlag)
Depex.SetHelpTextList(GetHelpTextList(self.HelpText))
return Depex
def ToXml(self, Depex, Key):
if self.HelpText:
pass
AttributeList = [['SupArchList', GetStringOfList(Depex.GetSupArchList())],
['SupModList', Depex.GetModuleType()]]
NodeList = [['Expression', Depex.GetDepex()]]
if Depex.GetHelpText():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Depex.GetHelpText(), 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "Expression = %s" % (self.Expression)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# BootModeXml
#
class BootModeXml(object):
def __init__(self):
self.SupportedBootModes = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.SupportedBootModes = \
XmlElement(Item, '%s/SupportedBootModes' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
BootMode = BootModeObject()
BootMode.SetSupportedBootModes(self.SupportedBootModes)
BootMode.SetUsage(self.CommonDefines.Usage)
BootMode.SetHelpTextList(GetHelpTextList(self.HelpText))
return BootMode
def ToXml(self, BootMode, Key):
if self.HelpText:
pass
AttributeList = [['Usage', BootMode.GetUsage()], ]
NodeList = [['SupportedBootModes', BootMode.GetSupportedBootModes()]]
for Item in BootMode.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "SupportedBootModes = %s %s" % (self.SupportedBootModes, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# EventXml
#
class EventXml(object):
def __init__(self):
self.EventType = ''
self.Name = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.EventType = XmlAttribute(XmlNode(Item, '%s' % Key), 'EventType')
self.Name = XmlElement(Item, '%s' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Event = EventObject()
Event.SetEventType(self.EventType)
Event.SetUsage(self.CommonDefines.Usage)
Event.SetHelpTextList(GetHelpTextList(self.HelpText))
return Event
def ToXml(self, Event, Key):
if self.HelpText:
pass
AttributeList = [['EventType', Event.GetEventType()],
['Usage', Event.GetUsage()],
]
NodeList = []
for Item in Event.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "EventType = %s %s" % (self.EventType, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# HobXml
#
class HobXml(object):
def __init__(self):
self.HobType = ''
self.Name = ''
self.CommonDefines = CommonDefinesXml()
self.HelpText = []
def FromXml(self, Item, Key):
self.HobType = XmlAttribute(XmlNode(Item, '%s' % Key), 'HobType')
self.Name = XmlElement(Item, '%s' % Key)
self.CommonDefines.FromXml(Item, Key)
for HelpTextItem in XmlList(Item, '%s/HelpText' % Key):
HelpTextObj = HelpTextXml()
HelpTextObj.FromXml(HelpTextItem, '%s/HelpText' % Key)
self.HelpText.append(HelpTextObj)
Hob = HobObject()
Hob.SetHobType(self.HobType)
Hob.SetSupArchList(self.CommonDefines.SupArchList)
Hob.SetUsage(self.CommonDefines.Usage)
Hob.SetHelpTextList(GetHelpTextList(self.HelpText))
return Hob
def ToXml(self, Hob, Key):
if self.Name:
pass
AttributeList = [['HobType', Hob.GetHobType()],
['Usage', Hob.GetUsage()],
['SupArchList', GetStringOfList(Hob.GetSupArchList())], ]
NodeList = []
for Item in Hob.GetHelpTextList():
Tmp = HelpTextXml()
NodeList.append(Tmp.ToXml(Item, 'HelpText'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "HobType = %s %s" % (self.HobType, self.CommonDefines)
for Item in self.HelpText:
Str = Str + '\n\t' + str(Item)
return Str
##
# SourceFileXml
#
class SourceFileXml(object):
def __init__(self):
self.SourceFile = ''
self.ToolChainFamily = ''
self.FileType = ''
self.CommonDefines = CommonDefinesXml()
def FromXml(self, Item, Key):
self.ToolChainFamily = XmlAttribute(Item, 'Family')
self.SourceFile = XmlElement(Item, 'Filename')
self.CommonDefines.FromXml(Item, Key)
self.CommonDefines.FeatureFlag = ConvertNOTEQToNE(self.CommonDefines.FeatureFlag)
SourceFile = SourceFileObject()
SourceFile.SetSourceFile(self.SourceFile)
SourceFile.SetFamily(self.ToolChainFamily)
SourceFile.SetSupArchList(self.CommonDefines.SupArchList)
SourceFile.SetFeatureFlag(self.CommonDefines.FeatureFlag)
return SourceFile
def ToXml(self, SourceFile, Key):
if self.SourceFile:
pass
FeatureFlag = ConvertNEToNOTEQ(SourceFile.GetFeatureFlag())
AttributeList = [['SupArchList', GetStringOfList(SourceFile.GetSupArchList())],
['Family', SourceFile.GetFamily()],
['FeatureFlag', FeatureFlag], ]
Root = CreateXmlElement('%s' % Key, SourceFile.GetSourceFile(), [], AttributeList)
return Root
##
# ModulePropertyXml
#
class ModulePropertyXml(object):
def __init__(self):
self.CommonDefines = CommonDefinesXml()
self.ModuleType = ''
self.Path = ''
self.PcdIsDriver = ''
self.UefiSpecificationVersion = ''
self.PiSpecificationVersion = ''
self.SpecificationList = []
self.SpecificationVersion = ''
self.BootModes = []
self.Events = []
self.HOBs = []
def FromXml(self, Item, Key, Header=None):
self.CommonDefines.FromXml(Item, Key)
self.ModuleType = XmlElement(Item, '%s/ModuleType' % Key)
self.Path = XmlElement(Item, '%s/Path' % Key)
self.PcdIsDriver = XmlElement(Item, '%s/PcdIsDriver' % Key)
self.UefiSpecificationVersion = XmlElement(Item, '%s/UefiSpecificationVersion' % Key)
self.PiSpecificationVersion = XmlElement(Item, '%s/PiSpecificationVersion' % Key)
for SubItem in XmlList(Item, '%s/Specification' % Key):
Specification = XmlElement(SubItem, '/Specification')
Version = XmlAttribute(XmlNode(SubItem, '/Specification'), 'Version')
self.SpecificationList.append((Specification, Version))
for SubItem in XmlList(Item, '%s/BootMode' % Key):
Axml = BootModeXml()
BootMode = Axml.FromXml(SubItem, 'BootMode')
self.BootModes.append(BootMode)
for SubItem in XmlList(Item, '%s/Event' % Key):
Axml = EventXml()
Event = Axml.FromXml(SubItem, 'Event')
self.Events.append(Event)
for SubItem in XmlList(Item, '%s/HOB' % Key):
Axml = HobXml()
Hob = Axml.FromXml(SubItem, 'HOB')
self.HOBs.append(Hob)
if Header == None:
Header = ModuleObject()
Header.SetModuleType(self.ModuleType)
Header.SetSupArchList(self.CommonDefines.SupArchList)
Header.SetModulePath(self.Path)
Header.SetPcdIsDriver(self.PcdIsDriver)
Header.SetUefiSpecificationVersion(self.UefiSpecificationVersion)
Header.SetPiSpecificationVersion(self.PiSpecificationVersion)
Header.SetSpecList(self.SpecificationList)
return Header, self.BootModes, self.Events, self.HOBs
def ToXml(self, Header, BootModes, Events, Hobs, Key):
if self.ModuleType:
pass
AttributeList = [['SupArchList', GetStringOfList(Header.GetSupArchList())], ]
NodeList = [['ModuleType', Header.GetModuleType()],
['Path', Header.GetModulePath()],
['PcdIsDriver', Header.GetPcdIsDriver()],
['UefiSpecificationVersion', Header.GetUefiSpecificationVersion()],
['PiSpecificationVersion', Header.GetPiSpecificationVersion()],
]
for Item in Header.GetSpecList():
Spec, Version = Item
SpecElem = CreateXmlElement('Specification', Spec, [], [['Version', Version]])
NodeList.append(SpecElem)
for Item in BootModes:
Tmp = BootModeXml()
NodeList.append(Tmp.ToXml(Item, 'BootMode'))
for Item in Events:
Tmp = EventXml()
NodeList.append(Tmp.ToXml(Item, 'Event'))
for Item in Hobs:
Tmp = HobXml()
NodeList.append(Tmp.ToXml(Item, 'HOB'))
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
def __str__(self):
Str = "ModuleType = %s Path = %s PcdIsDriver = %s UefiSpecificationVersion = %s PiSpecificationVersion = %s \
Specification = %s SpecificationVersion = %s %s" % \
(self.ModuleType, self.Path, self.PcdIsDriver, \
self.UefiSpecificationVersion, self.PiSpecificationVersion, \
self.SpecificationList, self.SpecificationVersion, self.CommonDefines)
for Item in self.BootModes:
Str = Str + '\n\t' + str(Item)
for Item in self.Events:
Str = Str + '\n\t' + str(Item)
for Item in self.HOBs:
Str = Str + '\n\t' + str(Item)
return Str
##
# ModuleXml
#
class ModuleSurfaceAreaXml(object):
def __init__(self, Package=''):
self.Module = None
#
# indicate the package that this module resides in
#
self.Package = Package
def FromXml2(self, Item, Module):
if self.Module:
pass
#
# PeiDepex
#
PeiDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PeiDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'PeiDepex'), 'PeiDepex')
PeiDepexList.append(Depex)
Module.SetPeiDepex(PeiDepexList)
#
# DxeDepex
#
DxeDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/DxeDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'DxeDepex'), 'DxeDepex')
DxeDepexList.append(Depex)
Module.SetDxeDepex(DxeDepexList)
#
# SmmDepex
#
SmmDepexList = []
for SubItem in XmlList(Item, '/ModuleSurfaceArea/SmmDepex'):
Tmp = DepexXml()
Depex = Tmp.FromXml(XmlNode(SubItem, 'SmmDepex'), 'SmmDepex')
SmmDepexList.append(Depex)
Module.SetSmmDepex(SmmDepexList)
#
# MiscellaneousFile
Tmp = MiscellaneousFileXml()
MiscFileList = Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/MiscellaneousFiles'), 'MiscellaneousFiles')
if MiscFileList:
Module.SetMiscFileList([MiscFileList])
else:
Module.SetMiscFileList([])
#
# UserExtensions
#
for Item in XmlList(Item, '/ModuleSurfaceArea/UserExtensions'):
Tmp = UserExtensionsXml()
UserExtension = Tmp.FromXml(Item, 'UserExtensions')
Module.SetUserExtensionList(Module.GetUserExtensionList() + [UserExtension])
return Module
def FromXml(self, Item, Key, IsStandAlongModule=False):
IsBinaryModule = XmlAttribute(Item, 'BinaryModule')
#
# Header
#
Tmp = HeaderXml()
Module = Tmp.FromXml(XmlNode(Item, '/%s/Header' % Key), 'Header', True, IsStandAlongModule)
Module.SetBinaryModule(IsBinaryModule)
if IsBinaryModule:
GlobalData.gIS_BINARY_INF = True
#
# ModuleProperties
#
Tmp = ModulePropertyXml()
(Module, BootModes, Events, HOBs) = \
Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/ModuleProperties'), 'ModuleProperties', Module)
Module.SetBootModeList(BootModes)
Module.SetEventList(Events)
Module.SetHobList(HOBs)
#
# ClonedFrom
#
Tmp = ClonedFromXml()
ClonedFrom = Tmp.FromXml(XmlNode(Item, '/ModuleSurfaceArea/ClonedFrom'), 'ClonedFrom')
if ClonedFrom:
Module.SetClonedFrom(ClonedFrom)
#
# LibraryClass
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions/LibraryClass'):
Tmp = LibraryClassXml()
LibraryClass = Tmp.FromXml(SubItem, 'LibraryClass')
Module.SetLibraryClassList(Module.GetLibraryClassList() + [LibraryClass])
if XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions') and \
not XmlList(Item, '/ModuleSurfaceArea/LibraryClassDefinitions/LibraryClass'):
Module.SetLibraryClassList([None])
#
# SourceFiles
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/SourceFiles/Filename'):
Tmp = SourceFileXml()
SourceFile = Tmp.FromXml(SubItem, 'Filename')
Module.SetSourceFileList(Module.GetSourceFileList() + [SourceFile])
if XmlList(Item, '/ModuleSurfaceArea/SourceFiles') and \
not XmlList(Item, '/ModuleSurfaceArea/SourceFiles/Filename') :
Module.SetSourceFileList([None])
#
# BinaryFile
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/BinaryFiles/BinaryFile'):
Tmp = BinaryFileXml()
BinaryFile = Tmp.FromXml(SubItem, 'BinaryFile')
Module.SetBinaryFileList(Module.GetBinaryFileList() + [BinaryFile])
if XmlList(Item, '/ModuleSurfaceArea/BinaryFiles') and \
not XmlList(Item, '/ModuleSurfaceArea/BinaryFiles/BinaryFile') :
Module.SetBinaryFileList([None])
#
# PackageDependencies
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PackageDependencies/Package'):
Tmp = PackageXml()
PackageDependency = Tmp.FromXml(SubItem, 'Package')
Module.SetPackageDependencyList(Module.GetPackageDependencyList() + [PackageDependency])
if XmlList(Item, '/ModuleSurfaceArea/PackageDependencies') and \
not XmlList(Item, '/ModuleSurfaceArea/PackageDependencies/Package'):
Module.SetPackageDependencyList([None])
#
# Guid
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Guids/GuidCName'):
Tmp = GuidXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'GuidCName')
Module.SetGuidList(Module.GetGuidList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/Guids') and not XmlList(Item, '/ModuleSurfaceArea/Guids/GuidCName'):
Module.SetGuidList([None])
#
# Protocol
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Protocols/Protocol'):
Tmp = ProtocolXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'Protocol')
Module.SetProtocolList(Module.GetProtocolList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/Protocols') and not XmlList(Item, '/ModuleSurfaceArea/Protocols/Protocol'):
Module.SetProtocolList([None])
#
# Ppi
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PPIs/Ppi'):
Tmp = PpiXml('Module')
GuidProtocolPpi = Tmp.FromXml(SubItem, 'Ppi')
Module.SetPpiList(Module.GetPpiList() + [GuidProtocolPpi])
if XmlList(Item, '/ModuleSurfaceArea/PPIs') and not XmlList(Item, '/ModuleSurfaceArea/PPIs/Ppi'):
Module.SetPpiList([None])
#
# Extern
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/Externs/Extern'):
Tmp = ExternXml()
Extern = Tmp.FromXml(SubItem, 'Extern')
Module.SetExternList(Module.GetExternList() + [Extern])
if XmlList(Item, '/ModuleSurfaceArea/Externs') and not XmlList(Item, '/ModuleSurfaceArea/Externs/Extern'):
Module.SetExternList([None])
if not Module.GetBinaryModule():
#
# PcdCoded
#
for SubItem in XmlList(Item, '/ModuleSurfaceArea/PcdCoded/PcdEntry'):
Tmp = PcdEntryXml()
PcdEntry = Tmp.FromXml3(SubItem, 'PcdEntry')
Module.SetPcdList(Module.GetPcdList() + [PcdEntry])
if XmlList(Item, '/ModuleSurfaceArea/PcdCoded') and \
not XmlList(Item, '/ModuleSurfaceArea/PcdCoded/PcdEntry'):
Module.SetPcdList([None])
Module = self.FromXml2(Item, Module)
#
# return the module object
#
self.Module = Module
return self.Module
def ToXml(self, Module):
if self.Package:
pass
#
# Create root node of module surface area
#
DomModule = minidom.Document().createElement('ModuleSurfaceArea')
if Module.GetBinaryModule():
DomModule.setAttribute('BinaryModule', 'true')
#
# Header
#
Tmp = HeaderXml()
DomModule.appendChild(Tmp.ToXml(Module, 'Header'))
#
# ModuleProperties
#
Tmp = ModulePropertyXml()
DomModule.appendChild(Tmp.ToXml(Module, Module.GetBootModeList(), Module.GetEventList(), Module.GetHobList(), \
'ModuleProperties'))
#
# ClonedFrom
#
Tmp = ClonedFromXml()
if Module.GetClonedFrom():
DomModule.appendChild(Tmp.ToXml(Module.GetClonedFrom(), 'ClonedFrom'))
#
# LibraryClass
#
LibraryClassNode = CreateXmlElement('LibraryClassDefinitions', '', [], [])
for LibraryClass in Module.GetLibraryClassList():
Tmp = LibraryClassXml()
LibraryClassNode.appendChild(Tmp.ToXml2(LibraryClass, 'LibraryClass'))
DomModule.appendChild(LibraryClassNode)
#
# SourceFile
#
SourceFileNode = CreateXmlElement('SourceFiles', '', [], [])
for SourceFile in Module.GetSourceFileList():
Tmp = SourceFileXml()
SourceFileNode.appendChild(Tmp.ToXml(SourceFile, 'Filename'))
DomModule.appendChild(SourceFileNode)
#
# BinaryFile
#
BinaryFileNode = CreateXmlElement('BinaryFiles', '', [], [])
for BinaryFile in Module.GetBinaryFileList():
Tmp = BinaryFileXml()
BinaryFileNode.appendChild(Tmp.ToXml(BinaryFile, 'BinaryFile'))
DomModule.appendChild(BinaryFileNode)
#
# PackageDependencies
#
PackageDependencyNode = CreateXmlElement('PackageDependencies', '', [], [])
for PackageDependency in Module.GetPackageDependencyList():
Tmp = PackageXml()
PackageDependencyNode.appendChild(Tmp.ToXml(PackageDependency, 'Package'))
DomModule.appendChild(PackageDependencyNode)
#
# Guid
#
GuidProtocolPpiNode = CreateXmlElement('Guids', '', [], [])
for GuidProtocolPpi in Module.GetGuidList():
Tmp = GuidXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'GuidCName'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Protocol
#
GuidProtocolPpiNode = CreateXmlElement('Protocols', '', [], [])
for GuidProtocolPpi in Module.GetProtocolList():
Tmp = ProtocolXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'Protocol'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Ppi
#
GuidProtocolPpiNode = CreateXmlElement('PPIs', '', [], [])
for GuidProtocolPpi in Module.GetPpiList():
Tmp = PpiXml('Module')
GuidProtocolPpiNode.appendChild(Tmp.ToXml(GuidProtocolPpi, 'Ppi'))
DomModule.appendChild(GuidProtocolPpiNode)
#
# Extern
#
ExternNode = CreateXmlElement('Externs', '', [], [])
for Extern in Module.GetExternList():
Tmp = ExternXml()
ExternNode.appendChild(Tmp.ToXml(Extern, 'Extern'))
DomModule.appendChild(ExternNode)
#
# PcdCoded
#
PcdEntryNode = CreateXmlElement('PcdCoded', '', [], [])
for PcdEntry in Module.GetPcdList():
Tmp = PcdEntryXml()
PcdEntryNode.appendChild(Tmp.ToXml3(PcdEntry, 'PcdEntry'))
DomModule.appendChild(PcdEntryNode)
#
# PeiDepex
#
if Module.GetPeiDepex():
for Item in Module.GetPeiDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'PeiDepex'))
#
# DxeDepex
#
if Module.GetDxeDepex():
for Item in Module.GetDxeDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'DxeDepex'))
#
# SmmDepex
#
if Module.GetSmmDepex():
for Item in Module.GetSmmDepex():
Tmp = DepexXml()
DomModule.appendChild(Tmp.ToXml(Item, 'SmmDepex'))
#
# MiscellaneousFile
#
if Module.GetMiscFileList():
Tmp = MiscellaneousFileXml()
DomModule.appendChild(Tmp.ToXml(Module.GetMiscFileList()[0], 'MiscellaneousFiles'))
#
# UserExtensions
#
if Module.GetUserExtensionList():
for UserExtension in Module.GetUserExtensionList():
Tmp = UserExtensionsXml()
DomModule.appendChild(Tmp.ToXml(UserExtension, 'UserExtensions'))
return DomModule
##
# BuildFlagXml used to generate BuildFlag for <AsBuilt>
#
class BuildFlagXml(object):
def __init__(self):
self.Target = ''
self.TagName = ''
self.Family = ''
self.AsBuiltFlags = ''
def FromXml(self, Item, Key):
self.Target = XmlElement(Item, '%s/Target' % Key)
self.TagName = XmlElement(Item, '%s/TagName' % Key)
self.Family = XmlElement(Item, '%s/Family' % Key)
BuildFlag = BinaryBuildFlagObject()
BuildFlag.SetTarget(self.Target)
BuildFlag.SetTagName(self.TagName)
BuildFlag.SetFamily(self.Family)
return BuildFlag
#
# For AsBuild INF usage
#
def FromXml2(self, Item, Key):
self.AsBuiltFlags = XmlElement(Item, '%s' % Key)
LineList = GetSplitValueList(self.AsBuiltFlags, '\n')
ReturnLine = ''
Count = 0
for Line in LineList:
if Count == 0:
ReturnLine = "# " + Line
else:
ReturnLine = ReturnLine + '\n' + '# ' + Line
Count += 1
BuildFlag = BinaryBuildFlagObject()
BuildFlag.SetAsBuiltOptionFlags(ReturnLine)
return BuildFlag
def ToXml(self, BuildFlag, Key):
if self.Target:
pass
AttributeList = []
NodeList = []
NodeList.append(['BuildFlags', BuildFlag])
Root = CreateXmlElement('%s' % Key, '', NodeList, AttributeList)
return Root
| bsd-2-clause | 1,149,584,609,138,463,200 | 35.645709 | 120 | 0.589141 | false |
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/structured/labs/serving/application/lib/pyasn1_modules/rfc5990.py | 13 | 5505 | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Use of the RSA-KEM Key Transport Algorithm in the CMS
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc5990.txt
#
from pyasn1.type import constraint
from pyasn1.type import namedtype
from pyasn1.type import univ
from pyasn1_modules import rfc5280
MAX = float('inf')
def _OID(*components):
output = []
for x in tuple(components):
if isinstance(x, univ.ObjectIdentifier):
output.extend(list(x))
else:
output.append(int(x))
return univ.ObjectIdentifier(output)
# Imports from RFC 5280
AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
# Useful types and definitions
class NullParms(univ.Null):
pass
# Object identifier arcs
is18033_2 = _OID(1, 0, 18033, 2)
nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4)
pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
x9_44 = _OID(1, 3, 133, 16, 840, 9, 44)
x9_44_components = _OID(x9_44, 1)
# Types for algorithm identifiers
class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
pass
class DataEncapsulationMechanism(AlgorithmIdentifier):
pass
class KDF2_HashFunction(AlgorithmIdentifier):
pass
class KDF3_HashFunction(AlgorithmIdentifier):
pass
class KeyDerivationFunction(AlgorithmIdentifier):
pass
class KeyEncapsulationMechanism(AlgorithmIdentifier):
pass
class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
pass
# RSA-KEM Key Transport Algorithm
id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14)
class GenericHybridParameters(univ.Sequence):
pass
GenericHybridParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('kem', KeyEncapsulationMechanism()),
namedtype.NamedType('dem', DataEncapsulationMechanism())
)
rsa_kem = AlgorithmIdentifier()
rsa_kem['algorithm'] = id_rsa_kem
rsa_kem['parameters'] = GenericHybridParameters()
# KEM-RSA Key Encapsulation Mechanism
id_kem_rsa = _OID(is18033_2, 2, 4)
class KeyLength(univ.Integer):
pass
KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
class RsaKemParameters(univ.Sequence):
pass
RsaKemParameters.componentType = namedtype.NamedTypes(
namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
namedtype.NamedType('keyLength', KeyLength())
)
kem_rsa = AlgorithmIdentifier()
kem_rsa['algorithm'] = id_kem_rsa
kem_rsa['parameters'] = RsaKemParameters()
# Key Derivation Functions
id_kdf_kdf2 = _OID(x9_44_components, 1)
id_kdf_kdf3 = _OID(x9_44_components, 2)
kdf2 = AlgorithmIdentifier()
kdf2['algorithm'] = id_kdf_kdf2
kdf2['parameters'] = KDF2_HashFunction()
kdf3 = AlgorithmIdentifier()
kdf3['algorithm'] = id_kdf_kdf3
kdf3['parameters'] = KDF3_HashFunction()
# Hash Functions
id_sha1 = _OID(1, 3, 14, 3, 2, 26)
id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
sha1 = AlgorithmIdentifier()
sha1['algorithm'] = id_sha1
sha1['parameters'] = univ.Null("")
sha224 = AlgorithmIdentifier()
sha224['algorithm'] = id_sha224
sha224['parameters'] = univ.Null("")
sha256 = AlgorithmIdentifier()
sha256['algorithm'] = id_sha256
sha256['parameters'] = univ.Null("")
sha384 = AlgorithmIdentifier()
sha384['algorithm'] = id_sha384
sha384['parameters'] = univ.Null("")
sha512 = AlgorithmIdentifier()
sha512['algorithm'] = id_sha512
sha512['parameters'] = univ.Null("")
# Symmetric Key-Wrapping Schemes
id_aes128_Wrap = _OID(nistAlgorithm, 1, 5)
id_aes192_Wrap = _OID(nistAlgorithm, 1, 25)
id_aes256_Wrap = _OID(nistAlgorithm, 1, 45)
id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6)
id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2)
id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3)
id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4)
aes128_Wrap = AlgorithmIdentifier()
aes128_Wrap['algorithm'] = id_aes128_Wrap
# aes128_Wrap['parameters'] are absent
aes192_Wrap = AlgorithmIdentifier()
aes192_Wrap['algorithm'] = id_aes128_Wrap
# aes192_Wrap['parameters'] are absent
aes256_Wrap = AlgorithmIdentifier()
aes256_Wrap['algorithm'] = id_sha256
# aes256_Wrap['parameters'] are absent
tdes_Wrap = AlgorithmIdentifier()
tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
tdes_Wrap['parameters'] = univ.Null("")
camellia128_Wrap = AlgorithmIdentifier()
camellia128_Wrap['algorithm'] = id_camellia128_Wrap
# camellia128_Wrap['parameters'] are absent
camellia192_Wrap = AlgorithmIdentifier()
camellia192_Wrap['algorithm'] = id_camellia192_Wrap
# camellia192_Wrap['parameters'] are absent
camellia256_Wrap = AlgorithmIdentifier()
camellia256_Wrap['algorithm'] = id_camellia256_Wrap
# camellia256_Wrap['parameters'] are absent
# Update the Algorithm Identifier map in rfc5280.py.
# Note that the ones that must not have parameters are not added to the map.
_algorithmIdentifierMapUpdate = {
id_rsa_kem: GenericHybridParameters(),
id_kem_rsa: RsaKemParameters(),
id_kdf_kdf2: KDF2_HashFunction(),
id_kdf_kdf3: KDF3_HashFunction(),
id_sha1: univ.Null(),
id_sha224: univ.Null(),
id_sha256: univ.Null(),
id_sha384: univ.Null(),
id_sha512: univ.Null(),
id_alg_CMS3DESwrap: univ.Null(),
}
rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
| apache-2.0 | -8,169,353,605,028,463,000 | 22.227848 | 76 | 0.711898 | false |
phrawzty/pelican-plugins | liquid_tags/mdx_liquid_tags.py | 11 | 3236 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| agpl-3.0 | 3,961,652,124,342,269,400 | 33.425532 | 77 | 0.596106 | false |
sserrot/champion_relationships | venv/Lib/site-packages/jinja2/nodes.py | 7 | 31095 | # -*- coding: utf-8 -*-
"""AST nodes generated by the parser for the compiler. Also provides
some node tree helper functions used by the parser and compiler in order
to normalize nodes.
"""
import operator
from collections import deque
from markupsafe import Markup
from ._compat import izip
from ._compat import PY2
from ._compat import text_type
from ._compat import with_metaclass
_binop_to_func = {
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
"+": operator.add,
"-": operator.sub,
}
_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
_cmpop_to_func = {
"eq": operator.eq,
"ne": operator.ne,
"gt": operator.gt,
"gteq": operator.ge,
"lt": operator.lt,
"lteq": operator.le,
"in": lambda a, b: a in b,
"notin": lambda a, b: a not in b,
}
class Impossible(Exception):
"""Raised if the node could not perform a requested action."""
class NodeType(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(mcs, name, bases, d):
for attr in "fields", "attributes":
storage = []
storage.extend(getattr(bases[0], attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) == 1, "multiple inheritance not allowed"
assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
d.setdefault("abstract", False)
return type.__new__(mcs, name, bases, d)
class EvalContext(object):
"""Holds evaluation time information. Custom attributes can be attached
to it in extensions.
"""
def __init__(self, environment, template_name=None):
self.environment = environment
if callable(environment.autoescape):
self.autoescape = environment.autoescape(template_name)
else:
self.autoescape = environment.autoescape
self.volatile = False
def save(self):
return self.__dict__.copy()
def revert(self, old):
self.__dict__.clear()
self.__dict__.update(old)
def get_eval_context(node, ctx):
if ctx is None:
if node.environment is None:
raise RuntimeError(
"if no eval context is passed, the "
"node must have an attached "
"environment."
)
return EvalContext(node.environment)
return ctx
class Node(with_metaclass(NodeType, object)):
"""Baseclass for all Jinja nodes. There are a number of nodes available
of different types. There are four major types:
- :class:`Stmt`: statements
- :class:`Expr`: expressions
- :class:`Helper`: helper nodes
- :class:`Template`: the outermost wrapper node
All nodes have fields and attributes. Fields may be other nodes, lists,
or arbitrary values. Fields are passed to the constructor as regular
positional arguments, attributes as keyword arguments. Each node has
two attributes: `lineno` (the line number of the node) and `environment`.
The `environment` attribute is set at the end of the parsing process for
all nodes automatically.
"""
fields = ()
attributes = ("lineno", "environment")
abstract = True
def __init__(self, *fields, **attributes):
if self.abstract:
raise TypeError("abstract nodes are not instantiable")
if fields:
if len(fields) != len(self.fields):
if not self.fields:
raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
raise TypeError(
"%r takes 0 or %d argument%s"
% (
self.__class__.__name__,
len(self.fields),
len(self.fields) != 1 and "s" or "",
)
)
for name, arg in izip(self.fields, fields):
setattr(self, name, arg)
for attr in self.attributes:
setattr(self, attr, attributes.pop(attr, None))
if attributes:
raise TypeError("unknown attribute %r" % next(iter(attributes)))
def iter_fields(self, exclude=None, only=None):
"""This method iterates over all fields that are defined and yields
``(key, value)`` tuples. Per default all fields are returned, but
it's possible to limit that to some fields by providing the `only`
parameter or to exclude some using the `exclude` parameter. Both
should be sets or tuples of field names.
"""
for name in self.fields:
if (
(exclude is only is None)
or (exclude is not None and name not in exclude)
or (only is not None and name in only)
):
try:
yield name, getattr(self, name)
except AttributeError:
pass
def iter_child_nodes(self, exclude=None, only=None):
"""Iterates over all direct child nodes of the node. This iterates
over all fields and yields the values of they are nodes. If the value
of a field is a list all the nodes in that list are returned.
"""
for _, item in self.iter_fields(exclude, only):
if isinstance(item, list):
for n in item:
if isinstance(n, Node):
yield n
elif isinstance(item, Node):
yield item
def find(self, node_type):
"""Find the first node of a given type. If no such node exists the
return value is `None`.
"""
for result in self.find_all(node_type):
return result
def find_all(self, node_type):
"""Find all the nodes of a given type. If the type is a tuple,
the check is performed for any of the tuple items.
"""
for child in self.iter_child_nodes():
if isinstance(child, node_type):
yield child
for result in child.find_all(node_type):
yield result
def set_ctx(self, ctx):
"""Reset the context of a node and all child nodes. Per default the
parser will all generate nodes that have a 'load' context as it's the
most common one. This method is used in the parser to set assignment
targets and other nodes to a store context.
"""
todo = deque([self])
while todo:
node = todo.popleft()
if "ctx" in node.fields:
node.ctx = ctx
todo.extend(node.iter_child_nodes())
return self
def set_lineno(self, lineno, override=False):
"""Set the line numbers of the node and children."""
todo = deque([self])
while todo:
node = todo.popleft()
if "lineno" in node.attributes:
if node.lineno is None or override:
node.lineno = lineno
todo.extend(node.iter_child_nodes())
return self
def set_environment(self, environment):
"""Set the environment for all nodes."""
todo = deque([self])
while todo:
node = todo.popleft()
node.environment = environment
todo.extend(node.iter_child_nodes())
return self
def __eq__(self, other):
return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
other.iter_fields()
)
def __ne__(self, other):
return not self.__eq__(other)
# Restore Python 2 hashing behavior on Python 3
__hash__ = object.__hash__
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
)
def dump(self):
def _dump(node):
if not isinstance(node, Node):
buf.append(repr(node))
return
buf.append("nodes.%s(" % node.__class__.__name__)
if not node.fields:
buf.append(")")
return
for idx, field in enumerate(node.fields):
if idx:
buf.append(", ")
value = getattr(node, field)
if isinstance(value, list):
buf.append("[")
for idx, item in enumerate(value):
if idx:
buf.append(", ")
_dump(item)
buf.append("]")
else:
_dump(value)
buf.append(")")
buf = []
_dump(self)
return "".join(buf)
class Stmt(Node):
"""Base node for all statements."""
abstract = True
class Helper(Node):
"""Nodes that exist in a specific context only."""
abstract = True
class Template(Node):
"""Node that represents a template. This must be the outermost node that
is passed to the compiler.
"""
fields = ("body",)
class Output(Stmt):
"""A node that holds multiple expressions which are then printed out.
This is used both for the `print` statement and the regular template data.
"""
fields = ("nodes",)
class Extends(Stmt):
"""Represents an extends statement."""
fields = ("template",)
class For(Stmt):
"""The for loop. `target` is the target for the iteration (usually a
:class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
of nodes that are used as loop-body, and `else_` a list of nodes for the
`else` block. If no else node exists it has to be an empty list.
For filtered nodes an expression can be stored as `test`, otherwise `None`.
"""
fields = ("target", "iter", "body", "else_", "test", "recursive")
class If(Stmt):
"""If `test` is true, `body` is rendered, else `else_`."""
fields = ("test", "body", "elif_", "else_")
class Macro(Stmt):
"""A macro definition. `name` is the name of the macro, `args` a list of
arguments and `defaults` a list of defaults if there are any. `body` is
a list of nodes for the macro body.
"""
fields = ("name", "args", "defaults", "body")
class CallBlock(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ("call", "args", "defaults", "body")
class FilterBlock(Stmt):
"""Node for filter sections."""
fields = ("body", "filter")
class With(Stmt):
"""Specific node for with statements. In older versions of Jinja the
with statement was implemented on the base of the `Scope` node instead.
.. versionadded:: 2.9.3
"""
fields = ("targets", "values", "body")
class Block(Stmt):
"""A node that represents a block."""
fields = ("name", "body", "scoped")
class Include(Stmt):
"""A node that represents the include tag."""
fields = ("template", "with_context", "ignore_missing")
class Import(Stmt):
"""A node that represents the import tag."""
fields = ("template", "target", "with_context")
class FromImport(Stmt):
"""A node that represents the from import tag. It's important to not
pass unsafe names to the name attribute. The compiler translates the
attribute lookups directly into getattr calls and does *not* use the
subscript callback of the interface. As exported variables may not
start with double underscores (which the parser asserts) this is not a
problem for regular Jinja code, but if this node is used in an extension
extra care must be taken.
The list of names may contain tuples if aliases are wanted.
"""
fields = ("template", "names", "with_context")
class ExprStmt(Stmt):
"""A statement that evaluates an expression and discards the result."""
fields = ("node",)
class Assign(Stmt):
"""Assigns an expression to a target."""
fields = ("target", "node")
class AssignBlock(Stmt):
"""Assigns a block to a target."""
fields = ("target", "filter", "body")
class Expr(Node):
"""Baseclass for all expressions."""
abstract = True
def as_const(self, eval_ctx=None):
"""Return the value of the expression as constant or raise
:exc:`Impossible` if this was not possible.
An :class:`EvalContext` can be provided, if none is given
a default context is created which requires the nodes to have
an attached environment.
.. versionchanged:: 2.4
the `eval_ctx` parameter was added.
"""
raise Impossible()
def can_assign(self):
"""Check if it's possible to assign something to this node."""
return False
class BinExpr(Expr):
"""Baseclass for all binary expressions."""
fields = ("left", "right")
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if (
self.environment.sandboxed
and self.operator in self.environment.intercepted_binops
):
raise Impossible()
f = _binop_to_func[self.operator]
try:
return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
except Exception:
raise Impossible()
class UnaryExpr(Expr):
"""Baseclass for all unary expressions."""
fields = ("node",)
operator = None
abstract = True
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
# intercepted operators cannot be folded at compile time
if (
self.environment.sandboxed
and self.operator in self.environment.intercepted_unops
):
raise Impossible()
f = _uaop_to_func[self.operator]
try:
return f(self.node.as_const(eval_ctx))
except Exception:
raise Impossible()
class Name(Expr):
"""Looks up a name or stores a value in a name.
The `ctx` of the node can be one of the following values:
- `store`: store a value in the name
- `load`: load that name
- `param`: like `store` but if the name was defined as function parameter.
"""
fields = ("name", "ctx")
def can_assign(self):
return self.name not in ("true", "false", "none", "True", "False", "None")
class NSRef(Expr):
"""Reference to a namespace value assignment"""
fields = ("name", "attr")
def can_assign(self):
# We don't need any special checks here; NSRef assignments have a
# runtime check to ensure the target is a namespace object which will
# have been checked already as it is created using a normal assignment
# which goes through a `Name` node.
return True
class Literal(Expr):
"""Baseclass for literals."""
abstract = True
class Const(Literal):
"""All constant values. The parser will return this node for simple
constants such as ``42`` or ``"foo"`` but it can be used to store more
complex values such as lists too. Only constants with a safe
representation (objects where ``eval(repr(x)) == x`` is true).
"""
fields = ("value",)
def as_const(self, eval_ctx=None):
rv = self.value
if (
PY2
and type(rv) is text_type
and self.environment.policies["compiler.ascii_str"]
):
try:
rv = rv.encode("ascii")
except UnicodeError:
pass
return rv
@classmethod
def from_untrusted(cls, value, lineno=None, environment=None):
"""Return a const object if the value is representable as
constant value in the generated code, otherwise it will raise
an `Impossible` exception.
"""
from .compiler import has_safe_repr
if not has_safe_repr(value):
raise Impossible()
return cls(value, lineno=lineno, environment=environment)
class TemplateData(Literal):
"""A constant template string."""
fields = ("data",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
if eval_ctx.autoescape:
return Markup(self.data)
return self.data
class Tuple(Literal):
"""For loop unpacking and some other things like multiple arguments
for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
is used for loading the names or storing.
"""
fields = ("items", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return tuple(x.as_const(eval_ctx) for x in self.items)
def can_assign(self):
for item in self.items:
if not item.can_assign():
return False
return True
class List(Literal):
"""Any list literal such as ``[1, 2, 3]``"""
fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return [x.as_const(eval_ctx) for x in self.items]
class Dict(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ("items",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
class Pair(Helper):
"""A key, value pair for dicts."""
fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
class Keyword(Helper):
"""A key, value pair for keyword arguments where key is a string."""
fields = ("key", "value")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.key, self.value.as_const(eval_ctx)
class CondExpr(Expr):
"""A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ("test", "expr1", "expr2")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.test.as_const(eval_ctx):
return self.expr1.as_const(eval_ctx)
# if we evaluate to an undefined object, we better do that at runtime
if self.expr2 is None:
raise Impossible()
return self.expr2.as_const(eval_ctx)
def args_as_const(node, eval_ctx):
args = [x.as_const(eval_ctx) for x in node.args]
kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
if node.dyn_args is not None:
try:
args.extend(node.dyn_args.as_const(eval_ctx))
except Exception:
raise Impossible()
if node.dyn_kwargs is not None:
try:
kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
except Exception:
raise Impossible()
return args, kwargs
class Filter(Expr):
"""This node applies a filter on an expression. `name` is the name of
the filter, the rest of the fields are the same as for :class:`Call`.
If the `node` of a filter is `None` the contents of the last buffer are
filtered. Buffers are created by macros and filter blocks.
"""
fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile or self.node is None:
raise Impossible()
# we have to be careful here because we call filter_ below.
# if this variable would be called filter, 2to3 would wrap the
# call in a list because it is assuming we are talking about the
# builtin filter function here which no longer returns a list in
# python 3. because of that, do not rename filter_ to filter!
filter_ = self.environment.filters.get(self.name)
if filter_ is None or getattr(filter_, "contextfilter", False) is True:
raise Impossible()
# We cannot constant handle async filters, so we need to make sure
# to not go down this path.
if eval_ctx.environment.is_async and getattr(
filter_, "asyncfiltervariant", False
):
raise Impossible()
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
if getattr(filter_, "evalcontextfilter", False) is True:
args.insert(0, eval_ctx)
elif getattr(filter_, "environmentfilter", False) is True:
args.insert(0, self.environment)
try:
return filter_(*args, **kwargs)
except Exception:
raise Impossible()
class Test(Expr):
"""Applies a test on an expression. `name` is the name of the test, the
rest of the fields are the same as for :class:`Call`.
"""
fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
def as_const(self, eval_ctx=None):
test = self.environment.tests.get(self.name)
if test is None:
raise Impossible()
eval_ctx = get_eval_context(self, eval_ctx)
args, kwargs = args_as_const(self, eval_ctx)
args.insert(0, self.node.as_const(eval_ctx))
try:
return test(*args, **kwargs)
except Exception:
raise Impossible()
class Call(Expr):
"""Calls an expression. `args` is a list of arguments, `kwargs` a list
of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
and `dyn_kwargs` has to be either `None` or a node that is used as
node for dynamic positional (``*args``) or keyword (``**kwargs``)
arguments.
"""
fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
class Getitem(Expr):
"""Get an attribute or item from an expression and prefer the item."""
fields = ("node", "arg", "ctx")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if self.ctx != "load":
raise Impossible()
try:
return self.environment.getitem(
self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Getattr(Expr):
"""Get an attribute or item from an expression that is a ascii-only
bytestring and prefer the attribute.
"""
fields = ("node", "attr", "ctx")
def as_const(self, eval_ctx=None):
if self.ctx != "load":
raise Impossible()
try:
eval_ctx = get_eval_context(self, eval_ctx)
return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
except Exception:
raise Impossible()
def can_assign(self):
return False
class Slice(Expr):
"""Represents a slice object. This must only be used as argument for
:class:`Subscript`.
"""
fields = ("start", "stop", "step")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
def const(obj):
if obj is None:
return None
return obj.as_const(eval_ctx)
return slice(const(self.start), const(self.stop), const(self.step))
class Concat(Expr):
"""Concatenates the list of expressions provided after converting them to
unicode.
"""
fields = ("nodes",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
class Compare(Expr):
"""Compares an expression with some other expressions. `ops` must be a
list of :class:`Operand`\\s.
"""
fields = ("expr", "ops")
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
result = value = self.expr.as_const(eval_ctx)
try:
for op in self.ops:
new_value = op.expr.as_const(eval_ctx)
result = _cmpop_to_func[op.op](value, new_value)
if not result:
return False
value = new_value
except Exception:
raise Impossible()
return result
class Operand(Helper):
"""Holds an operator and an expression."""
fields = ("op", "expr")
if __debug__:
Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
sorted(
"``%s``" % x
for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
)
)
class Mul(BinExpr):
"""Multiplies the left with the right node."""
operator = "*"
class Div(BinExpr):
"""Divides the left by the right node."""
operator = "/"
class FloorDiv(BinExpr):
"""Divides the left by the right node and truncates conver the
result into an integer by truncating.
"""
operator = "//"
class Add(BinExpr):
"""Add the left to the right node."""
operator = "+"
class Sub(BinExpr):
"""Subtract the right from the left node."""
operator = "-"
class Mod(BinExpr):
"""Left modulo right."""
operator = "%"
class Pow(BinExpr):
"""Left to the power of right."""
operator = "**"
class And(BinExpr):
"""Short circuited AND."""
operator = "and"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
class Or(BinExpr):
"""Short circuited OR."""
operator = "or"
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
class Not(UnaryExpr):
"""Negate the expression."""
operator = "not"
class Neg(UnaryExpr):
"""Make the expression negative."""
operator = "-"
class Pos(UnaryExpr):
"""Make the expression positive (noop for most expressions)"""
operator = "+"
# Helpers for extensions
class EnvironmentAttribute(Expr):
"""Loads an attribute from the environment object. This is useful for
extensions that want to call a callback stored on the environment.
"""
fields = ("name",)
class ExtensionAttribute(Expr):
"""Returns the attribute of an extension bound to the environment.
The identifier is the identifier of the :class:`Extension`.
This node is usually constructed by calling the
:meth:`~jinja2.ext.Extension.attr` method on an extension.
"""
fields = ("identifier", "name")
class ImportedName(Expr):
"""If created with an import name the import name is returned on node
access. For example ``ImportedName('cgi.escape')`` returns the `escape`
function from the cgi module on evaluation. Imports are optimized by the
compiler so there is no need to assign them to local variables.
"""
fields = ("importname",)
class InternalName(Expr):
"""An internal name in the compiler. You cannot create these nodes
yourself but the parser provides a
:meth:`~jinja2.parser.Parser.free_identifier` method that creates
a new identifier for you. This identifier is not available from the
template and is not threated specially by the compiler.
"""
fields = ("name",)
def __init__(self):
raise TypeError(
"Can't create internal names. Use the "
"`free_identifier` method on a parser."
)
class MarkSafe(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`)."""
fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
return Markup(self.expr.as_const(eval_ctx))
class MarkSafeIfAutoescape(Expr):
"""Mark the wrapped expression as safe (wrap it as `Markup`) but
only if autoescaping is active.
.. versionadded:: 2.5
"""
fields = ("expr",)
def as_const(self, eval_ctx=None):
eval_ctx = get_eval_context(self, eval_ctx)
if eval_ctx.volatile:
raise Impossible()
expr = self.expr.as_const(eval_ctx)
if eval_ctx.autoescape:
return Markup(expr)
return expr
class ContextReference(Expr):
"""Returns the current template context. It can be used like a
:class:`Name` node, with a ``'load'`` ctx and will return the
current :class:`~jinja2.runtime.Context` object.
Here an example that assigns the current template name to a
variable named `foo`::
Assign(Name('foo', ctx='store'),
Getattr(ContextReference(), 'name'))
This is basically equivalent to using the
:func:`~jinja2.contextfunction` decorator when using the
high-level API, which causes a reference to the context to be passed
as the first argument to a function.
"""
class DerivedContextReference(Expr):
"""Return the current template context including locals. Behaves
exactly like :class:`ContextReference`, but includes local
variables, such as from a ``for`` loop.
.. versionadded:: 2.11
"""
class Continue(Stmt):
"""Continue a loop."""
class Break(Stmt):
"""Break a loop."""
class Scope(Stmt):
"""An artificial scope."""
fields = ("body",)
class OverlayScope(Stmt):
"""An overlay scope for extensions. This is a largely unoptimized scope
that however can be used to introduce completely arbitrary variables into
a sub scope from a dictionary or dictionary like object. The `context`
field has to evaluate to a dictionary object.
Example usage::
OverlayScope(context=self.call_method('get_context'),
body=[...])
.. versionadded:: 2.10
"""
fields = ("context", "body")
class EvalContextModifier(Stmt):
"""Modifies the eval context. For each option that should be modified,
a :class:`Keyword` has to be added to the :attr:`options` list.
Example to change the `autoescape` setting::
EvalContextModifier(options=[Keyword('autoescape', Const(True))])
"""
fields = ("options",)
class ScopedEvalContextModifier(EvalContextModifier):
"""Modifies the eval context and reverts it later. Works exactly like
:class:`EvalContextModifier` but will only modify the
:class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
"""
fields = ("body",)
# make sure nobody creates custom nodes
def _failing_new(*args, **kwargs):
raise TypeError("can't create custom node types")
NodeType.__new__ = staticmethod(_failing_new)
del _failing_new
| mit | -4,534,974,667,471,050,000 | 27.579963 | 88 | 0.598135 | false |
zstackio/zstack-woodpecker | integrationtest/vm/vpc_ha/suite_teardown.py | 2 | 1332 | '''
Integration Test Teardown case
@author: Youyk
'''
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstackwoodpecker.setup_actions as setup_actions
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.clean_util as clean_util
import zstackwoodpecker.test_lib as test_lib
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
def test():
clean_util.cleanup_all_vms_violently()
clean_util.cleanup_none_vm_volumes_violently()
clean_util.umount_all_primary_storages_violently()
clean_util.cleanup_backup_storage()
#linux.remove_vlan_eth("eth0", 10)
#linux.remove_vlan_eth("eth0", 11)
cmd = host_plugin.DeleteVlanDeviceCmd()
cmd.vlan_ethname = 'eth0.10'
hosts = test_lib.lib_get_all_hosts_from_plan()
if type(hosts) != type([]):
hosts = [hosts]
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
cmd.vlan_ethname = 'eth0.11'
for host in hosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd)
test_lib.setup_plan.stop_node()
test_lib.lib_cleanup_host_ip_dict()
test_util.test_pass('VPC Teardown Success')
| apache-2.0 | 1,106,873,221,679,088,500 | 33.153846 | 116 | 0.725225 | false |
cylc/cylc | cylc/flow/cycling/util.py | 1 | 1605 | # THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Cycling utility functions."""
from metomi.isodatetime.parsers import TimePointParser, DurationParser
def add_offset(cycle_point, offset):
"""Add a (positive or negative) offset to a cycle point.
Return the result.
"""
my_parser = TimePointParser()
my_target_point = my_parser.parse(cycle_point, dump_as_parsed=True)
my_offset_parser = DurationParser()
oper = "+"
if offset.startswith("-") or offset.startswith("+"):
oper = offset[0]
offset = offset[1:]
if offset.startswith("P"):
my_shift = my_offset_parser.parse(offset)
if oper == "-":
my_target_point -= my_shift
else:
my_target_point += my_shift
else:
# TODO - raise appropriate exception
raise ValueError("ERROR, bad offset format: %s" % offset)
return my_target_point
| gpl-3.0 | -738,610,313,434,172,500 | 35.477273 | 71 | 0.684735 | false |
AlgoLab/pygfa | test/test_dovetails_all_simple_paths.py | 1 | 6256 | import sys
sys.path.insert(0, '../')
import copy
import unittest
import pygfa
gfa_file = "H\tVN:Z:1.0\n" + "S\t1\t*\tLN:i:6871\tRC:i:2200067\n" \
+ "S\t10\t*\tLN:i:251\tRC:i:82006\n" + "S\t11\t*\tLN:i:208\tRC:i:39533\n" \
+ "S\t12\t*\tLN:i:186\tRC:i:34457\n" + "S\t16\t*\tLN:i:157\tRC:i:15334\n" \
+ "S\t18\t*\tLN:i:145\tRC:i:55632\n" + "S\t19\t*\tLN:i:134\tRC:i:49274\n" \
+ "S\t2\t*\tLN:i:4589\tRC:i:6428225\n" + "S\t20\t*\tLN:i:134\tRC:i:20521\n" \
+ "S\t21\t*\tLN:i:133\tRC:i:28174\n" + "S\t22\t*\tLN:i:132\tRC:i:17846\n" \
+ "S\t23\t*\tLN:i:132\tRC:i:24658\n" + "S\t24\t*\tLN:i:107\tRC:i:22256\n" \
+ "S\t3\t*\tLN:i:2044\tRC:i:2727166\n" + "S\t4\t*\tLN:i:1744\tRC:i:1729157\n" \
+ "S\t5\t*\tLN:i:1378\tRC:i:1071246\n" + "S\t6\t*\tLN:i:1356\tRC:i:422793\n" \
+ "S\t7\t*\tLN:i:920\tRC:i:630822\n" + "S\t8\t*\tLN:i:876\tRC:i:794734\n" \
+ "S\t9\t*\tLN:i:255\tRC:i:40589\n" + "S\t25\t*\tLN:i:1000\n" \
+ "S\t26\t*\tLN:i:1000\n" + "S\t27\t*\tLN:i:1000\n" \
+ "S\t28\t*\tLN:i:1000\n" + "L\t25\t+\t26\t+\t10M\n" \
+ "L\t26\t+\t27\t+\t7M\n" + "L\t27\t+\t28\t+\t10M\n" \
+ "L\t28\t+\t25\t+\t5M\n" + "S\t13\t*\n" \
+ "S\t41\t*\n" + "C\t1\t+\t5\t+\t12\t120M\tID:Z:1_to_5\n" \
+ "P\t15\t41+,13+\t120M\n" + "L\t11\t+\t13\t+\t120M\tID:Z:11_to_13\n" \
+ "L\t1\t+\t2\t+\t10M\n" + "L\t1\t-\t19\t-\t10M\n" \
+ "L\t10\t+\t3\t-\t10M\n" + "L\t10\t-\t4\t+\t10M\n" \
+ "L\t11\t-\t6\t-\t10M\n" + "L\t11\t+\t9\t-\t10M\n" \
+ "L\t12\t+\t9\t+\t10M\n" + "L\t12\t-\t18\t+\t10M\n" \
+ "L\t16\t+\t20\t+\t10M\n" + "L\t16\t-\t22\t-\t10M\n" \
+ "L\t18\t+\t19\t+\t10M\n" + "L\t18\t-\t23\t+\t10M\n" \
+ "L\t2\t+\t5\t+\t10M\n" + "L\t2\t+\t5\t-\t10M\n" \
+ "L\t2\t-\t8\t+\t10M\n" + "L\t20\t+\t21\t+\t10M\n" \
+ "L\t21\t+\t23\t-\t10M\n" + "L\t22\t-\t6\t-\t10M\n" \
+ "L\t24\t+\t7\t+\t10M\n" + "L\t24\t-\t7\t+\t10M\n" \
+ "L\t3\t+\t4\t-\t10M\n" + "L\t3\t-\t6\t+\t10M\n" \
+ "L\t3\t-\t8\t-\t10M\n" + "L\t4\t-\t7\t-\t10M\n"
#
#
# [8_8_.... ... ... ... ..8_8]
# [5 / [13] \ [10_10] [24
# 26 5 \ / \ \ / \ /24
# / \ 5 [2_2_2] [12] --- [9] ------ [11] [3_3_3] / / 24
# 25 27 5 / \ / \ / \[4_4_4] --- [7] 24
# \ / 5] [1_1_1] --- [19] --- [18] [6_6_6_6] \ 24
# 28 \[23]-[21]-[20]-[16]-[22]/ \24]
#
# [41_41_41]
#
class TestLine (unittest.TestCase):
graph = pygfa.gfa.GFA()
graph.from_string(gfa_file)
def test_dovetails_all_simple_paths(self):
self.assertTrue([("18", "12"),
("12", "9"),
("9", "11"),
("11", "6")]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=True)))
self.assertTrue([("18", "23"),
("23", "21"),
("21", "20"),
("20", "16"),
("16", "22"),
("22", "6")]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=True)))
self.assertTrue([("18", "19"),
("19", "1"),
("1", "2"),
("2", "8"),
("8", "3"),
("3", "6")]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=True)))
self.assertTrue(len(list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=True))) == 3)
self.assertTrue(["18", "12", "9", "11", "6"]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=False)))
self.assertTrue(["18", "23", "21", "20", "16", "22", "6"]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=False)))
self.assertTrue(["18", "19", "1", "2", "8", "3", "6"]
in list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=False)))
self.assertTrue(len(list(pygfa.dovetails_all_simple_paths(\
self.graph,
"18",
"6",
edges=True))) == 3)
if __name__ == '__main__':
unittest.main()
| mit | 5,264,646,996,850,292,000 | 48.650794 | 114 | 0.297315 | false |
paradoxxxzero/pyjade | pyjade/lexer.py | 1 | 23622 | from __future__ import absolute_import
import re
from collections import deque
import six
class Token:
def __init__(self, **kwds):
self.buffer = None
self.__dict__.update(kwds)
def __str__(self):
return self.__dict__.__str__()
def regexec(regex, input):
matches = regex.match(input)
if matches:
return (input[matches.start():matches.end()],) + matches.groups()
return None
def detect_closing_bracket(string):
count = 0
pos = string.find('[')
while True:
if string[pos] == '[':
count += 1
if string[pos] == ']':
count -= 1
pos += 1
if count == 0:
return pos
def replace_string_brackets(splitted_string):
sval_replaced = []
old_delim = None
for i in splitted_string:
if old_delim is None:
sval_replaced.append(i)
if i in ('"', "'"):
old_delim = i
continue
if i in ('"', "'"):
if i == old_delim:
old_delim = None
sval_replaced.append(i)
continue
sval_replaced.append(re.sub(r'\[|\]', '*', i))
return ''.join(sval_replaced)
class Lexer(object):
RE_INPUT = re.compile(r'\r\n|\r')
RE_COMMENT = re.compile(r'^ *\/\/(-)?([^\n]*)')
RE_TAG = re.compile(r'^(\w[-:\w]*)')
RE_DOT_BLOCK_START = re.compile(r'^\.\n')
RE_FILTER = re.compile(r'^:(\w+)')
RE_DOCTYPE = re.compile(r'^(?:!!!|doctype) *([^\n]+)?')
RE_ID = re.compile(r'^#([\w-]+)')
RE_CLASS = re.compile(r'^\.([\w-]+)')
RE_STRING = re.compile(r'^(?:\| ?)([^\n]+)')
RE_TEXT = re.compile(r'^([^\n]+)')
RE_EXTENDS = re.compile(r'^extends? +([^\n]+)')
RE_PREPEND = re.compile(r'^prepend +([^\n]+)')
RE_APPEND = re.compile(r'^append +([^\n]+)')
RE_BLOCK = re.compile(r'''^block(( +(?:(prepend|append) +)?([^\n]*))|\n)''')
RE_YIELD = re.compile(r'^yield *')
RE_INCLUDE = re.compile(r'^include +([^\n]+)')
RE_ASSIGNMENT = re.compile(r'^(-[^\n\w]+var[^\n\w]+)?(\w+) += *([^;\n]+)( *;? *)')
RE_MIXIN = re.compile(r'^mixin +([-\w]+)(?: *\((.*)\))?')
RE_CALL = re.compile(r'^\+\s*([-.\w]+)(?: *\((.*)\))?')
RE_CONDITIONAL = re.compile(r'^(?:- *)?(if|unless|else if|elif|else)\b([^\n]*)')
RE_BLANK = re.compile(r'^\n *\n')
# RE_WHILE = re.compile(r'^while +([^\n]+)')
RE_EACH = re.compile(r'^(?:- *)?(?:each|for) +([\w, ]+) +in +([^\n]+)')
RE_BUFFERED_CODE = re.compile(r'^(!?=)([^\n]*)')
RE_UNBUFFERED_CODE = re.compile(r'^- *([^\n]*)')
RE_ATTR_INTERPOLATE = re.compile(r'#\{([^}]+)\}')
RE_ATTR_PARSE = re.compile(r'''^['"]|['"]$''')
RE_INDENT_TABS = re.compile(r'^\n(\t*) *')
RE_INDENT_SPACES = re.compile(r'^\n( *)')
RE_COLON = re.compile(r'^: *')
RE_INLINE = re.compile(r'(?<!\\)#\[')
RE_INLINE_ESCAPE = re.compile(r'\\#\[')
STRING_SPLITS = re.compile(r'([\'"])(.*?)(?<!\\)(\1)')
def __init__(self, string, **options):
if isinstance(string, six.binary_type):
string = six.text_type(string, 'utf8')
self.options = options
self.input = self.RE_INPUT.sub('\n', string)
self.colons = self.options.get('colons', False)
self.deferredTokens = deque()
self.lastIndents = 0
self.lineno = 1
self.stash = deque()
self.indentStack = deque()
self.indentRe = None
self.pipeless = False
self.isTextBlock = False
def tok(self, type, val=None):
return Token(type=type, line=self.lineno, val=val, inline_level=self.options.get('inline_level', 0))
def consume(self, len):
self.input = self.input[len:]
def scan(self, regexp, type):
captures = regexec(regexp, self.input)
# print regexp,type, self.input, captures
if captures:
# print captures
self.consume(len(captures[0]))
# print 'a',self.input
if len(captures) == 1:
return self.tok(type, None)
return self.tok(type, captures[1])
def defer(self, tok):
self.deferredTokens.append(tok)
def lookahead(self, n):
# print self.stash
fetch = n - len(self.stash)
while True:
fetch -= 1
if not fetch >= 0:
break
self.stash.append(self.next())
return self.stash[n - 1]
def indexOfDelimiters(self, start, end):
str, nstart, nend, pos = self.input, 0, 0, 0
for i, s in enumerate(str):
if start == s:
nstart += 1
elif end == s:
nend += 1
if nend == nstart:
pos = i
break
return pos
def stashed(self):
# print self.stash
return len(self.stash) and self.stash.popleft()
def deferred(self):
return len(self.deferredTokens) and self.deferredTokens.popleft()
def eos(self):
# print 'eos',bool(self.input)
if self.input:
return
if self.indentStack:
self.indentStack.popleft()
return self.tok('outdent')
else:
return self.tok('eos')
def consumeBlank(self):
captures = regexec(self.RE_BLANK, self.input)
if not captures:
return
self.lineno += 1
self.consume(len(captures[0]) - 1)
return captures
def blank(self):
if self.pipeless:
return
if self.consumeBlank():
return self.next()
def comment(self):
captures = regexec(self.RE_COMMENT, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('comment', captures[2])
tok.buffer = '-' != captures[1]
return tok
def tag(self):
captures = regexec(self.RE_TAG, self.input)
# print self.input,captures,re.match('^(\w[-:\w]*)',self.input)
if captures:
self.consume(len(captures[0]))
name = captures[1]
if name.endswith(':'):
name = name[:-1]
tok = self.tok('tag', name)
self.defer(self.tok(':'))
while self.input[0] == ' ':
self.input = self.input[1:]
else:
tok = self.tok('tag', name)
return tok
def textBlockStart(self):
captures = regexec(self.RE_DOT_BLOCK_START, self.input)
if captures is None:
return
if len(self.indentStack) > 0:
self.textBlockTagIndent = self.indentStack[0]
else:
self.textBlockTagIndent = 0
self.consume(1)
self.isTextBlock = True
return self.textBlockContinue(isStart=True)
def textBlockContinue(self, isStart=False):
if not self.isTextBlock:
return
tokens = deque()
while True:
if self.consumeBlank():
if not isStart:
tokens.append(self.tok('string', ''))
continue
eos = self.eos()
if eos is not None:
if isStart:
return eos
tokens.append(eos)
break
nextIndent = self.captureIndent()
if nextIndent is None or len(nextIndent[1]) <= self.textBlockTagIndent:
self.isTextBlock = False
if isStart:
return self.tok('newline')
break
padding = 0
if not isStart and len(nextIndent[1]) > self.textBlockIndent:
padding = len(nextIndent[1]) - self.textBlockIndent
self.consume(1 + padding)
self.input = '\n' + self.input
indent = self.indent()
if isStart:
self.textBlockIndent = indent.val
padding = 0
itoks = self.scanInline(self.RE_TEXT, 'string')
indentChar = self.indentRe == self.RE_INDENT_TABS and '\t' or ' '
if itoks:
itoks[0].val = (indentChar * padding) + itoks[0].val
if isStart:
for tok in itoks or []:
self.defer(tok)
return indent
tokens.extend(itoks)
if not tokens:
firstTok = None
else:
firstTok = tokens.popleft()
while tokens:
if tokens[-1].type == 'string' and not tokens[-1].val:
tokens.pop()
continue
self.defer(tokens.popleft())
self.isTextBlock = False
return firstTok
def filter(self):
return self.scan(self.RE_FILTER, 'filter')
def doctype(self):
# print self.scan(self.RE_DOCTYPE, 'doctype')
return self.scan(self.RE_DOCTYPE, 'doctype')
def id(self):
return self.scan(self.RE_ID, 'id')
def className(self):
return self.scan(self.RE_CLASS, 'class')
def processInline(self, val):
sval = self.STRING_SPLITS.split(val)
sval_stripped = [i.strip() for i in sval]
if sval_stripped.count('"') % 2 != 0 or sval_stripped.count("'") % 2 != 0:
raise Exception('Unbalanced quotes found inside inline jade at line %s.' % self.lineno)
sval_replaced = replace_string_brackets(sval)
start_inline = self.RE_INLINE.search(sval_replaced).start()
try:
closing = start_inline + detect_closing_bracket(sval_replaced[start_inline:])
except IndexError:
raise Exception('The end of the string was reached with no closing bracket found at line %s.' % self.lineno)
textl = val[:start_inline]
code = val[start_inline:closing][2:-1]
textr = val[closing:]
toks = deque()
toks.append(self.tok('string', self.RE_INLINE_ESCAPE.sub('#[', textl)))
ilexer = InlineLexer(code, inline_level=self.options.get('inline_level', 0) + 1)
while True:
tok = ilexer.advance()
if tok.type == 'eos':
break
toks.append(tok)
if self.RE_INLINE.search(textr):
toks.extend(self.processInline(textr))
else:
toks.append(self.tok('string', self.RE_INLINE_ESCAPE.sub('#[', textr)))
return toks
def scanInline(self, regexp, type):
ret = self.scan(regexp, type)
if ret is None:
return ret
if self.RE_INLINE.search(ret.val):
ret = self.processInline(ret.val)
if ret:
ret[0].val = ret[0].val.lstrip()
else:
ret.val = self.RE_INLINE_ESCAPE.sub('#[', ret.val)
ret = deque([ret])
return ret
def scanInlineProcess(self, regexp, type_):
toks = self.scanInline(regexp, type_)
if not toks:
return None
firstTok = toks.popleft()
for tok in toks:
self.defer(tok)
return firstTok
def string(self):
return self.scanInlineProcess(self.RE_STRING, 'string')
def text(self):
return self.scanInlineProcess(self.RE_TEXT, 'text')
def extends(self):
return self.scan(self.RE_EXTENDS, 'extends')
def prepend(self):
captures = regexec(self.RE_PREPEND, self.input)
if captures:
self.consume(len(captures[0]))
mode, name = 'prepend', captures[1]
tok = self.tok('block', name)
tok.mode = mode
return tok
def append(self):
captures = regexec(self.RE_APPEND, self.input)
if captures:
self.consume(len(captures[0]))
mode, name = 'append', captures[1]
tok = self.tok('block', name)
tok.mode = mode
return tok
def block(self):
captures = regexec(self.RE_BLOCK, self.input)
if captures:
self.consume(len(captures[0]))
mode = captures[3] or 'replace'
name = captures[4] or ''
tok = self.tok('block', name)
tok.mode = mode
return tok
def _yield(self):
return self.scan(self.RE_YIELD, 'yield')
def include(self):
return self.scan(self.RE_INCLUDE, 'include')
def assignment(self):
captures = regexec(self.RE_ASSIGNMENT, self.input)
if captures:
self.consume(len(captures[0]))
name, val = captures[2:4]
tok = self.tok('assignment')
tok.name = name
tok.val = val
return tok
def mixin(self):
captures = regexec(self.RE_MIXIN, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('mixin', captures[1])
tok.args = captures[2]
return tok
def call(self):
captures = regexec(self.RE_CALL, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('call', captures[1])
tok.args = captures[2]
return tok
def conditional(self):
captures = regexec(self.RE_CONDITIONAL, self.input)
if captures:
self.consume(len(captures[0]))
type, sentence = captures[1:]
tok = self.tok('conditional', type)
tok.sentence = sentence
return tok
# def _while(self):
# captures = regexec(self.RE_WHILE,self.input)
# if captures:
# self.consume(len(captures[0]))
# return self.tok('code','while(%s)'%captures[1])
def each(self):
captures = regexec(self.RE_EACH, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('each', None)
tok.keys = [x.strip() for x in captures[1].split(',')]
tok.code = captures[2]
return tok
def buffered_code(self):
captures = regexec(self.RE_BUFFERED_CODE, self.input)
if captures:
self.consume(len(captures[0]))
flags, name = captures[1:]
tok = self.tok('code', name)
tok.escape = flags.startswith('=')
#print captures
tok.buffer = True
# print tok.buffer
return tok
def unbuffered_code(self):
captures = regexec(self.RE_UNBUFFERED_CODE, self.input)
if captures:
self.consume(len(captures[0]))
tok = self.tok('code', captures[1])
#print captures
tok.escape = False
tok.buffer = False
# print tok.buffer
return tok
def attrs(self):
if '(' == self.input[0]:
index = self.indexOfDelimiters('(', ')')
string = self.input[1:index]
tok = self.tok('attrs')
l = len(string)
colons = self.colons
states = ['key']
class Namespace:
key = u''
val = u''
quote = u''
literal = True
def reset(self):
self.key = self.val = self.quote = u''
self.literal = True
def __str__(self):
return dict(key=self.key, val=self.val, quote=self.quote,
literal=self.literal).__str__()
ns = Namespace()
def state():
return states[-1]
def interpolate(attr):
attr, num = self.RE_ATTR_INTERPOLATE.subn(lambda matchobj: '%s+"{}".format(%s)+%s' % (ns.quote, matchobj.group(1), ns.quote), attr)
return attr, (num > 0)
self.consume(index + 1)
from .utils import odict
tok.attrs = odict()
tok.static_attrs = set()
str_nums = list(map(str, range(10)))
# print '------'
def parse(c):
real = c
if colons and ':' == c:
c = '='
ns.literal = ns.literal and (state() not in ('object', 'array',
'expr'))
# print ns, c, states
if c in (',', '\n') or (c == ' ' and state() == 'val' and len(states) == 2 and ns.val.strip()):
s = state()
if s in ('expr', 'array', 'string', 'object'):
ns.val += c
else:
states.append('key')
ns.val = ns.val.strip()
ns.key = ns.key.strip()
if not ns.key:
return
# ns.literal = ns.quote
if not ns.literal:
if '!' == ns.key[-1]:
ns.literal = True
ns.key = ns.key[:-1]
ns.key = ns.key.strip("'\"")
if not ns.val:
tok.attrs[ns.key] = True
else:
val, is_interpolated = interpolate(ns.val)
if ns.key == 'class' and 'class' in tok.attrs:
tok.attrs['class'] = '[%s, %s]' % (
tok.attrs['class'],
val
)
else:
tok.attrs[ns.key] = val
ns.literal = ns.literal and not is_interpolated
if ns.literal:
tok.static_attrs.add(ns.key)
ns.reset()
elif '=' == c:
s = state()
if s == 'key char':
ns.key += real
elif s in ('val', 'expr', 'array', 'string', 'object'):
ns.val += real
else:
states.append('val')
elif '(' == c:
if state() in ('val', 'expr'):
states.append('expr')
ns.val += c
elif ')' == c:
if state() in ('val', 'expr'):
states.pop()
ns.val += c
elif '{' == c:
if 'val' == state():
states.append('object')
ns.val += c
elif '}' == c:
if 'object' == state():
states.pop()
ns.val += c
elif '[' == c:
if 'val' == state():
states.append('array')
ns.val += c
elif ']' == c:
if 'array' == state():
states.pop()
ns.val += c
elif c in ('"', "'"):
s = state()
if 'key' == s:
states.append('key char')
elif 'key char' == s:
states.pop()
elif 'string' == s:
if c == ns.quote:
states.pop()
ns.val += c
else:
states.append('string')
ns.val += c
ns.quote = c
elif '' == c:
pass
else:
s = state()
ns.literal = ns.literal and (s in ('key', 'string') or c in str_nums)
# print c, s, ns.literal
if s in ('key', 'key char'):
ns.key += c
else:
ns.val += c
for char in string:
parse(char)
parse(',')
return tok
def captureIndent(self):
if self.indentRe:
captures = regexec(self.indentRe, self.input)
else:
regex = self.RE_INDENT_TABS
captures = regexec(regex, self.input)
if captures and not captures[1]:
regex = self.RE_INDENT_SPACES
captures = regexec(regex, self.input)
if captures and captures[1]:
self.indentRe = regex
return captures
def indent(self):
captures = self.captureIndent()
if captures:
indents = len(captures[1])
self.lineno += 1
self.consume(indents + 1)
if not self.input:
return self.tok('newline')
if self.input[0] in (' ', '\t'):
raise Exception('Invalid indentation, you can use tabs or spaces but not both')
if '\n' == self.input[0]:
return self.tok('newline')
if self.indentStack and indents < self.indentStack[0]:
while self.indentStack and self.indentStack[0] > indents:
self.stash.append(self.tok('outdent'))
self.indentStack.popleft()
tok = self.stash.pop()
elif indents and (not self.indentStack or indents != self.indentStack[0]):
self.indentStack.appendleft(indents)
tok = self.tok('indent', indents)
else:
tok = self.tok('newline')
return tok
def pipelessText(self):
if self.pipeless:
if '\n' == self.input[0]:
return
i = self.input.find('\n')
if -1 == i:
i = len(self.input)
str = self.input[:i]
self.consume(len(str))
return self.tok('text', str)
def colon(self):
return self.scan(self.RE_COLON, ':')
def advance(self):
return self.stashed() or self.next()
def next(self):
return self.deferred() \
or self.textBlockContinue() \
or self.blank() \
or self.eos() \
or self.pipelessText() \
or self._yield() \
or self.doctype() \
or self.extends() \
or self.append() \
or self.prepend() \
or self.block() \
or self.include() \
or self.mixin() \
or self.call() \
or self.conditional() \
or self.each() \
or self.assignment() \
or self.tag() \
or self.textBlockStart() \
or self.filter() \
or self.unbuffered_code() \
or self.buffered_code() \
or self.id() \
or self.className() \
or self.attrs() \
or self.indent() \
or self.comment() \
or self.colon() \
or self.string() \
or self.text()
##or self._while() \
class InlineLexer(Lexer):
def next(self):
return self.deferred() \
or self.blank() \
or self.eos() \
or self.pipelessText() \
or self.mixin() \
or self.call() \
or self.assignment() \
or self.tag() \
or self.unbuffered_code() \
or self.buffered_code() \
or self.id() \
or self.className() \
or self.attrs() \
or self.colon() \
or self.string() \
or self.text()
| mit | 8,763,327,623,169,851,000 | 31.899721 | 147 | 0.461731 | false |
Adai0808/pybrain | pybrain/optimization/finitedifference/spsa.py | 25 | 2286 | __author__ = 'Frank Sehnke, [email protected], Tom Schaul'
from scipy import random
from .fd import FiniteDifferences
class SimpleSPSA(FiniteDifferences):
""" Simultaneous Perturbation Stochastic Approximation.
This class uses SPSA in general, but uses the likelihood gradient and a simpler exploration decay.
"""
epsilon = 2. #Initial value of exploration size
gamma = 0.9995 #Exploration decay factor
batchSize = 2
def _additionalInit(self):
self.baseline = None #Moving average baseline, used just for visualisation
def perturbation(self):
# generates a uniform difference vector with the given epsilon
deltas = (random.randint(0, 2, self.numParameters) * 2 - 1) * self.epsilon
# reduce epsilon by factor gamma
# as another simplification we let the exploration just decay with gamma.
# Is similar to the decreasing exploration in SPSA but simpler.
self.epsilon *= self.gamma
return deltas
def _learnStep(self):
""" calculates the gradient and executes a step in the direction
of the gradient, scaled with a learning rate alpha. """
deltas = self.perturbation()
#reward of positive and negative perturbations
reward1 = self._oneEvaluation(self.current + deltas)
reward2 = self._oneEvaluation(self.current - deltas)
self.mreward = (reward1 + reward2) / 2.
if self.baseline is None:
# first learning step
self.baseline = self.mreward * 0.99
fakt = 0.
else:
#calc the gradients
if reward1 != reward2:
#gradient estimate alla SPSA but with likelihood gradient and normalization (see also "update parameters")
fakt = (reward1 - reward2) / (2.0 * self.bestEvaluation - reward1 - reward2)
else:
fakt = 0.0
self.baseline = 0.9 * self.baseline + 0.1 * self.mreward #update baseline
# update parameters
# as a simplification we use alpha = alpha * epsilon**2 for decaying the stepsize instead of the usual use method from SPSA
# resulting in the same update rule like for PGPE
self.current = self.gd(fakt * self.epsilon * self.epsilon / deltas)
| bsd-3-clause | 9,036,491,802,210,398,000 | 39.105263 | 131 | 0.650044 | false |
corpusmusic/billboardcorpus | parse.py | 1 | 5825 | from __future__ import division
import os
import re
import csv
from collections import defaultdict, deque
ROOT_DIR = 'McGill-Billboard'
KEYS = [{'A'}, {'A#', 'Bb'}, {'B', 'Cb'}, {'C'}, {'C#', 'Db'}, {'D'}, {'D#', 'Eb'}, {'E', 'Fb'}, {'F'}, {'F#', 'Gb'}, {'G'}, {'G#', 'Ab'}]
RN = ['I', 'bII', 'II', 'bIII', 'III', 'IV', 'bV', 'V', 'bVI', 'VI', 'bVII', 'VII']
def lookup_chord(key, key_list):
"""Look up the numerical position of a chord root relative to an ordered list of roots (possibly shifted)."""
for i, k in enumerate(key_list):
if key in k:
return i
def corpus_list(root):
"""Return a list of all paths to the salami text files, relative to the root directory of the project."""
return [os.path.join(* [root, sub_dir, 'salami_chords.txt']) for sub_dir in os.listdir(root)]
def get_tonic(f):
fs = open(f)
line = fs.readline()
while 'tonic' not in line:
line = fs.readline()
tonic = line[line.index(':') + 2:-1]
return tonic
def get_chord_sequence(line):
"""
Return a list of chord sequences from a line.
"""
chords = re.findall(r'\S+:\S+|\s+Z{1}\s|&pause|\s+N+\s', line)
return [c.strip() for c in chords]
def get_relative(tonic, chords):
"""
Return a list of the relative root numbers based on the tonic note and
a list of absolute chords. Returns integers, where 0 -> I, 1 -> II, etc.
TODO: needs testing and verification that it is working properly
"""
root_num = lookup_chord(tonic, KEYS)
shifted_keys = deque(KEYS)
shifted_keys.rotate(-root_num)
relative_chords = []
for c in chords:
if c.strip() == "N" or c.strip() == "Z" or c.strip() == "&pause":
relative_chords.append("NonHarmonic")
else:
root, quality = c.split(':')
relative_chords.append(RN[lookup_chord(root, shifted_keys)])
return relative_chords
def update_form(previous_form, line):
if "title" in line or "artist" in line:
return previous_form
regex = re.compile("[a-z]+,|[A-Z]+'+,|[A-Z]+,|[a-z]+-+[a-z]+,|[a-z]+\s{1}[a-z]+,")
newform = regex.findall(line)
if newform:
if len(newform) == 1:
newform.insert(0,"")
#super temp fix
if newform[1] == "A," or newform[1] == "B," or newform[1] == "C,":
return [ newform[1].replace(",",""),previous_form[1] ]
#this is really terrible practice, but this error happens with less than .5% of chords.
if newform[1] == "voice," or newform[1] == "brass," or newform[1] == "synth,":
return previous_form
return [newform[0].replace(",",""), newform[1].replace(",","")]
else:
return previous_form
def get_chord_quality(chordList):
qualityList = []
for c in chordList:
if c.strip() == "N" or c.strip() == "Z" or c.strip() == "&pause":
qualityList.append("NonHarmonic")
else:
qualityList.append(c.split(":")[1])
return qualityList
def get_bar_in_phrase(line):
barNumbersList = []
barlist = line.split("|")[1:-1]
for index, bar in enumerate(barlist):
chordsInBar = get_chord_sequence(bar)
barNumbersList += [index + 1]*len(chordsInBar)
return barNumbersList
def get_total_bars(barNumbersList):
if barNumbersList: return barNumbersList[len(barNumbersList) -1]
def get_title(song):
fs = open(song)
line = fs.readline()
while 'title' not in line:
line = fs.readline()
title = line[line.index(':') + 2:-1]
return title
def get_arrow(line):
if "->" in line:
return 1
else:
return 0
def get_repeats(line):
repeats = re.findall(r'x+\d',line)
if repeats:
return int(repeats[0][-1])
else:
return 1
def update_key(line, tonic):
if "tonic:" in line:
tonic = line[line.index(':') + 2:-1]
return tonic
if __name__ == '__main__':
"""Write an example csv to play with for the analysis code."""
filenames = corpus_list(ROOT_DIR)
relative_chords = []
with open('example.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for song in filenames:
Tonic = get_tonic(song)
fs=open(song)
formFunc = []
arrowList = []
titleList = []
chordList = []
relativeChordList = []
barNumbers = []
totalBarNumbers = []
formFuncList = []
formLetterList = []
chordQualityList = []
for line in fs:
chordsInPhrase = get_chord_sequence(line)
relativeChords = get_relative(Tonic,chordsInPhrase)
title = [get_title(song).replace(" ","")]*len(chordsInPhrase)
if 'Z' not in line :
barInPhrase = get_bar_in_phrase(line)
else:
barInPhrase = [0]
if 'Z' not in line :
totalInPhrase = [get_total_bars(barInPhrase)]*len(chordsInPhrase)
else:
totalInPhrase = [0]
phraseFormFunc = []
formLetter = []
formFunc = update_form(formFunc,line)
if formFunc: phraseFormFunc = [formFunc[1]]*len(chordsInPhrase)
if "arth" in phraseFormFunc:
print song
if formFunc: formLetter = [formFunc[0]]*len(chordsInPhrase)
chordQualities = get_chord_quality(chordsInPhrase)
arrows = [0]*len(chordsInPhrase)
if arrows: arrows[-1] = get_arrow(line)
repeats = get_repeats(line)
Tonic = update_key(line,Tonic)
titleList += title*repeats
chordList += chordsInPhrase*repeats
relativeChordList += relativeChords*repeats
barNumbers += barInPhrase*repeats
totalBarNumbers += totalInPhrase*repeats
formFuncList += phraseFormFunc*repeats
formLetterList += formLetter*repeats
chordQualityList += chordQualities*repeats
arrowList += arrows*repeats
for title, form, letter, chord, interval, quality, num, total, arrows in zip(titleList, formFuncList,formLetterList,chordList,relativeChordList,chordQualityList,barNumbers,totalBarNumbers,arrowList):
writer.writerow([title, form,letter,chord,interval,quality,num,total,arrows])
writer.writerow([])
| gpl-3.0 | 871,573,351,823,196,700 | 28.568528 | 201 | 0.636223 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.