repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jaimeMF/youtube-dl | youtube_dl/extractor/ynet.py | 105 | 1741 | # coding: utf-8
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote_plus
class YnetIE(InfoExtractor):
_VALID_URL = r'http://(?:.+?\.)?ynet\.co\.il/(?:.+?/)?0,7340,(?P<id>L(?:-[0-9]+)+),00\.html'
_TESTS = [
{
'url': 'http://hot.ynet.co.il/home/0,7340,L-11659-99244,00.html',
'info_dict': {
'id': 'L-11659-99244',
'ext': 'flv',
'title': 'איש לא יודע מאיפה באנו',
'thumbnail': 're:^https?://.*\.jpg',
}
}, {
'url': 'http://hot.ynet.co.il/home/0,7340,L-8859-84418,00.html',
'info_dict': {
'id': 'L-8859-84418',
'ext': 'flv',
'title': "צפו: הנשיקה הלוהטת של תורגי' ויוליה פלוטקין",
'thumbnail': 're:^https?://.*\.jpg',
}
}
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
content = compat_urllib_parse_unquote_plus(self._og_search_video_url(webpage))
config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config'))
f4m_url = config['clip']['url']
title = self._og_search_title(webpage)
m = re.search(r'ynet - HOT -- (["\']+)(?P<title>.+?)\1', title)
if m:
title = m.group('title')
return {
'id': video_id,
'title': title,
'formats': self._extract_f4m_formats(f4m_url, video_id),
'thumbnail': self._og_search_thumbnail(webpage),
}
| unlicense |
geimer/easybuild-easyblocks | easybuild/easyblocks/i/ipp.py | 4 | 3046 | ##
# Copyright 2009-2013 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for installing the Intel Performance Primitives (IPP) library, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.intelbase import IntelBase, ACTIVATION_NAME_2012, LICENSE_FILE_NAME_2012
class EB_ipp(IntelBase):
"""
Support for installing Intel Integrated Performance Primitives library
"""
def install_step(self):
"""
Actual installation
- create silent cfg file
- execute command
"""
silent_cfg_names_map = None
if LooseVersion(self.version) < LooseVersion('8.0'):
silent_cfg_names_map = {
'activation_name': ACTIVATION_NAME_2012,
'license_file_name': LICENSE_FILE_NAME_2012,
}
super(EB_ipp, self).install_step(silent_cfg_names_map=silent_cfg_names_map)
def sanity_check_step(self):
"""Custom sanity check paths for IPP."""
if LooseVersion(self.version) < LooseVersion('8.0'):
dirs = ["compiler/lib/intel64", "ipp/bin", "ipp/include",
"ipp/interfaces/data-compression", "ipp/tools/intel64"]
else:
dirs = ["composerxe/lib/intel64", "ipp/bin", "ipp/include",
"ipp/tools/intel64"]
custom_paths = {
'files': ["ipp/lib/intel64/libipp%s" % y
for x in ["ac", "cc", "ch", "core", "cv", "dc", "di",
"i", "j", "m", "r", "s", "sc", "vc", "vm"]
for y in ["%s.a" % x, "%s.so" % x]],
'dirs': dirs
}
super(EB_ipp, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
iakovos-panourgias/fluidity | tests/mms_rans_p1p1_cv_keps/mms_rans_p1p1_cv_keps_tools.py | 30 | 13064 | from math import sin, cos, tanh, pi
def u(X):
return cos(X[0]) + 0.600*sin(X[1]) + 3.00
def v(X):
return X[1]*sin(X[0])
def p(X):
return cos(X[1]) + sin(X[0]) + sin(X[0]*X[1]/pi) - 1.00
def rho(X):
return -1.30*cos(2.10*X[1]) - 1.80*sin(1.70*X[0]) + 3.70*sin(1.30*X[0]*X[1]/pi) + 5.20
def ke(X):
return 0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900
def eps(X):
return 4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20
def forcing_u(X):
return 0.600*X[1]*cos(X[1])*sin(X[0]) + (X[1]*cos(X[0]) + 0.600*cos(X[1]))*(1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(X[1]*cos(X[0]) + 0.600*cos(X[1]))*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) - (0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2*(cos(X[0]) - 0.600*sin(X[1]))/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + 2*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2*cos(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) - (cos(X[0]) + 0.600*sin(X[1]) + 3.00)*sin(X[0]) - 2*(1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2*sin(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 + 4*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)*sin(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + X[1]*cos(X[0]*X[1]/pi)/pi + 2.00*cos(X[0]) + 0.919238815300000*cos(2.10*X[1]) + 1.27279220580000*sin(1.70*X[0]) - 2.61629508970000*sin(1.30*X[0]*X[1]/pi) + 0.600*sin(X[1]) - 3.67695526120000
def forcing_v(X):
return X[1]*(cos(X[0]) + 0.600*sin(X[1]) + 3.00)*cos(X[0]) + X[1]*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2*sin(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + X[1]*sin(X[0])**2 + (X[1]*cos(X[0]) + 0.600*cos(X[1]))*(1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(X[1]*cos(X[0]) + 0.600*cos(X[1]))*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + X[1]*sin(X[0]) + 2*(1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2*sin(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 4*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)*sin(X[0])/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + X[0]*cos(X[0]*X[1]/pi)/pi + 0.919238815300000*cos(2.10*X[1]) + 1.27279220580000*sin(1.70*X[0]) - 2.61629508970000*sin(1.30*X[0]*X[1]/pi) - sin(X[1]) - 3.67695526120000
def forcing_rho(X):
return X[1]*(4.81*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 2.73*sin(2.10*X[1]))*sin(X[0]) + ((1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(4.81*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 2.73*sin(2.10*X[1])) + ((1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(4.81*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 3.06*cos(1.70*X[0])) - ((0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + 1.00)*(-6.25300000000000*X[0]**2*sin(1.30*X[0]*X[1]/pi)/pi**2 - 6.25300000000000*X[1]**2*sin(1.30*X[0]*X[1]/pi)/pi**2 + 5.73300000000000*cos(2.10*X[1]) + 5.20200000000000*sin(1.70*X[0])) + (4.81*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 3.06*cos(1.70*X[0]))*(cos(X[0]) + 0.600*sin(X[1]) + 3.00)
def forcing_ke(X):
return X[1]*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*sin(X[0]) - ((0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + 1.00)*(-0.256*X[0]**2*cos(0.800*X[0]*X[1]/pi)/pi**2 - 0.256*X[1]**2*cos(0.800*X[0]*X[1]/pi)/pi**2 - 0.324*cos(0.600*X[0]) - 0.294*sin(0.700*X[1])) + ((1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1])) + ((1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0])) + (-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(cos(X[0]) + 0.600*sin(X[1]) + 3.00) - (X[1]**2*cos(X[0])**2 + 1.20*X[1]*cos(X[0])*cos(X[1]) + 0.360*cos(X[1])**2 + 4*sin(X[0])**2)*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + (3.40118361661000*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 3.40118361661000*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 2.16374674986000*cos(1.70*X[0]) + 1.93040151213000*sin(2.10*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + 4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20
def forcing_eps(X):
return X[1]*(1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*sin(X[0]) + (3.40118361661000*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 3.40118361661000*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 2.16374674986000*cos(1.70*X[0]) + 1.93040151213000*sin(2.10*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)*tanh((0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)/((X[1]*sin(X[0]))**2.00 - (0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)**2.00 + (cos(X[0]) + 0.600*sin(X[1]) + 3.00)**2.00)**0.500) + ((1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[0]*sin(0.800*X[0]*X[1]/pi)/pi + 0.420*cos(0.700*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(1.02*X[0]*cos(0.600*X[0]*X[1]/pi)/pi - 3.44*sin(0.800*X[1])) + ((1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2 - 2*(-0.320*X[1]*sin(0.800*X[0]*X[1]/pi)/pi - 0.540*sin(0.600*X[0]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20))*(1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0])) - ((0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20) + 1.00)*(-0.612*X[0]**2*sin(0.600*X[0]*X[1]/pi)/pi**2 - 0.612*X[1]**2*sin(0.600*X[0]*X[1]/pi)/pi**2 - 2.75200000000000*cos(0.800*X[1]) + 1.86200000000000*sin(0.700*X[0])) - (X[1]**2*cos(X[0])**2 + 1.20*X[1]*cos(X[0])*cos(X[1]) + 0.360*cos(X[1])**2 + 4*sin(X[0])**2)*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900) + (1.02*X[1]*cos(0.600*X[0]*X[1]/pi)/pi - 2.66*cos(0.700*X[0]))*(cos(X[0]) + 0.600*sin(X[1]) + 3.00) + (4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2/(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)
def P_ke(X):
return (X[1]**2*cos(X[0])**2 + 1.20*X[1]*cos(X[0])*cos(X[1]) + 0.360*cos(X[1])**2 + 4*sin(X[0])**2)*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)
def P_eps(X):
return (X[1]**2*cos(X[0])**2 + 1.20*X[1]*cos(X[0])*cos(X[1]) + 0.360*cos(X[1])**2 + 4*sin(X[0])**2)*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)
def A_ke(X):
return -4.30*cos(0.800*X[1]) + 3.80*sin(0.700*X[0]) - 1.70*sin(0.600*X[0]*X[1]/pi) - 8.20
def A_eps(X):
return -(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)**2.00/(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)
def B_ke(X):
return -(3.40118361661000*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 3.40118361661000*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 2.16374674986000*cos(1.70*X[0]) + 1.93040151213000*sin(2.10*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)
def B_eps(X):
return -(3.40118361661000*X[0]*cos(1.30*X[0]*X[1]/pi)/pi + 3.40118361661000*X[1]*cos(1.30*X[0]*X[1]/pi)/pi - 2.16374674986000*cos(1.70*X[0]) + 1.93040151213000*sin(2.10*X[1]))*(0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)*tanh((0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)/((X[1]*sin(X[0]))**2.00 - (0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)**2.00 + (cos(X[0]) + 0.600*sin(X[1]) + 3.00)**2.00)**0.500)
def EV(X):
return (0.900*cos(0.600*X[0]) + 0.400*cos(0.800*X[0]*X[1]/pi) + 0.600*sin(0.700*X[1]) + 0.900)**2/(4.30*cos(0.800*X[1]) - 3.80*sin(0.700*X[0]) + 1.70*sin(0.600*X[0]*X[1]/pi) + 8.20)
def velocity(X):
return [u(X), v(X)]
def forcing_velocity(X):
return [forcing_u(X), forcing_v(X)]
def C3(X):
return tanh((0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)/((X[1]*sin(X[0]))**2.00 - (0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)**2.00 + (cos(X[0]) + 0.600*sin(X[1]) + 3.00)**2.00)**0.500)
def u_xy(X):
return ((X[1]*sin(X[0]))**2.00 - (0.707106781000000*X[1]*sin(X[0]) + 0.707106781000000*cos(X[0]) + 0.424264068600000*sin(X[1]) + 2.12132034300000)**2.00 + (cos(X[0]) + 0.600*sin(X[1]) + 3.00)**2.00)**0.500
| lgpl-2.1 |
marco-lancini/Showcase | django/forms/fields.py | 49 | 38087 | """
Field classes.
"""
import datetime
import os
import re
import time
import urlparse
import warnings
from decimal import Decimal, DecimalException
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.core import validators
import django.utils.copycompat as copy
from django.utils import formats
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, smart_str
from django.utils.functional import lazy
# Provide this import for backwards compatibility.
from django.core.validators import EMPTY_VALUES
from util import ErrorList
from widgets import (TextInput, PasswordInput, HiddenInput,
MultipleHiddenInput, ClearableFileInput, CheckboxInput, Select,
NullBooleanSelect, SelectMultiple, DateInput, DateTimeInput, TimeInput,
SplitDateTimeWidget, SplitHiddenDateTimeWidget, FILE_INPUT_CONTRADICTION)
__all__ = (
'Field', 'CharField', 'IntegerField',
'DateField', 'TimeField', 'DateTimeField', 'TimeField',
'RegexField', 'EmailField', 'FileField', 'ImageField', 'URLField',
'BooleanField', 'NullBooleanField', 'ChoiceField', 'MultipleChoiceField',
'ComboField', 'MultiValueField', 'FloatField', 'DecimalField',
'SplitDateTimeField', 'IPAddressField', 'FilePathField', 'SlugField',
'TypedChoiceField', 'TypedMultipleChoiceField'
)
def en_format(name):
"""
Helper function to stay backward compatible.
"""
from django.conf.locale.en import formats
warnings.warn(
"`django.forms.fields.DEFAULT_%s` is deprecated; use `django.utils.formats.get_format('%s')` instead." % (name, name),
DeprecationWarning
)
return getattr(formats, name)
class Field(object):
widget = TextInput # Default widget to use when rendering this type of Field.
hidden_widget = HiddenInput # Default widget to use when rendering this as "hidden".
default_validators = [] # Default set of validators
default_error_messages = {
'required': _(u'This field is required.'),
'invalid': _(u'Enter a valid value.'),
}
# Tracks each time a Field instance is created. Used to retain order.
creation_counter = 0
def __init__(self, required=True, widget=None, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
validators=[], localize=False):
# required -- Boolean that specifies whether the field is required.
# True by default.
# widget -- A Widget class, or instance of a Widget class, that should
# be used for this Field when displaying it. Each Field has a
# default Widget that it'll use if you don't specify this. In
# most cases, the default widget is TextInput.
# label -- A verbose name for this field, for use in displaying this
# field in a form. By default, Django will use a "pretty"
# version of the form field name, if the Field is part of a
# Form.
# initial -- A value to use in this Field's initial display. This value
# is *not* used as a fallback if data isn't given.
# help_text -- An optional string to use as "help text" for this Field.
# error_messages -- An optional dictionary to override the default
# messages that the field will raise.
# show_hidden_initial -- Boolean that specifies if it is needed to render a
# hidden widget with initial value after widget.
# validators -- List of addtional validators to use
# localize -- Boolean that specifies if the field should be localized.
if label is not None:
label = smart_unicode(label)
self.required, self.label, self.initial = required, label, initial
self.show_hidden_initial = show_hidden_initial
if help_text is None:
self.help_text = u''
else:
self.help_text = smart_unicode(help_text)
widget = widget or self.widget
if isinstance(widget, type):
widget = widget()
# Trigger the localization machinery if needed.
self.localize = localize
if self.localize:
widget.is_localized = True
# Let the widget know whether it should display as required.
widget.is_required = self.required
# Hook into self.widget_attrs() for any Field-specific HTML attributes.
extra_attrs = self.widget_attrs(widget)
if extra_attrs:
widget.attrs.update(extra_attrs)
self.widget = widget
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
self.validators = self.default_validators + validators
def prepare_value(self, value):
return value
def to_python(self, value):
return value
def validate(self, value):
if value in validators.EMPTY_VALUES and self.required:
raise ValidationError(self.error_messages['required'])
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
def clean(self, value):
"""
Validates the given value and returns its "cleaned" value as an
appropriate Python object.
Raises ValidationError for any errors.
"""
value = self.to_python(value)
self.validate(value)
self.run_validators(value)
return value
def bound_data(self, data, initial):
"""
Return the value that should be shown for this field on render of a
bound form, given the submitted POST data for the field and the initial
data, if any.
For most fields, this will simply be data; FileFields need to handle it
a bit differently.
"""
return data
def widget_attrs(self, widget):
"""
Given a Widget instance (*not* a Widget class), returns a dictionary of
any HTML attributes that should be added to the Widget, based on this
Field.
"""
return {}
def __deepcopy__(self, memo):
result = copy.copy(self)
memo[id(self)] = result
result.widget = copy.deepcopy(self.widget, memo)
return result
class CharField(Field):
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
self.max_length, self.min_length = max_length, min_length
super(CharField, self).__init__(*args, **kwargs)
if min_length is not None:
self.validators.append(validators.MinLengthValidator(min_length))
if max_length is not None:
self.validators.append(validators.MaxLengthValidator(max_length))
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def widget_attrs(self, widget):
if self.max_length is not None and isinstance(widget, (TextInput, PasswordInput)):
# The HTML attribute is maxlength, not max_length.
return {'maxlength': str(self.max_length)}
class IntegerField(Field):
default_error_messages = {
'invalid': _(u'Enter a whole number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
}
def __init__(self, max_value=None, min_value=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
super(IntegerField, self).__init__(*args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that int() can be called on the input. Returns the result
of int(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = int(str(value))
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class FloatField(IntegerField):
default_error_messages = {
'invalid': _(u'Enter a number.'),
}
def to_python(self, value):
"""
Validates that float() can be called on the input. Returns the result
of float(). Returns None for empty values.
"""
value = super(IntegerField, self).to_python(value)
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
try:
value = float(value)
except (ValueError, TypeError):
raise ValidationError(self.error_messages['invalid'])
return value
class DecimalField(Field):
default_error_messages = {
'invalid': _(u'Enter a number.'),
'max_value': _(u'Ensure this value is less than or equal to %(limit_value)s.'),
'min_value': _(u'Ensure this value is greater than or equal to %(limit_value)s.'),
'max_digits': _('Ensure that there are no more than %s digits in total.'),
'max_decimal_places': _('Ensure that there are no more than %s decimal places.'),
'max_whole_digits': _('Ensure that there are no more than %s digits before the decimal point.')
}
def __init__(self, max_value=None, min_value=None, max_digits=None, decimal_places=None, *args, **kwargs):
self.max_value, self.min_value = max_value, min_value
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, *args, **kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
def to_python(self, value):
"""
Validates that the input is a decimal number. Returns a Decimal
instance. Returns None for empty values. Ensures that there are no more
than max_digits in the number, and no more than decimal_places digits
after the decimal point.
"""
if value in validators.EMPTY_VALUES:
return None
if self.localize:
value = formats.sanitize_separators(value)
value = smart_str(value).strip()
try:
value = Decimal(value)
except DecimalException:
raise ValidationError(self.error_messages['invalid'])
return value
def validate(self, value):
super(DecimalField, self).validate(value)
if value in validators.EMPTY_VALUES:
return
# Check for NaN, Inf and -Inf values. We can't compare directly for NaN,
# since it is never equal to itself. However, NaN is the only value that
# isn't equal to itself, so we can use this to identify NaN
if value != value or value == Decimal("Inf") or value == Decimal("-Inf"):
raise ValidationError(self.error_messages['invalid'])
sign, digittuple, exponent = value.as_tuple()
decimals = abs(exponent)
# digittuple doesn't include any leading zeros.
digits = len(digittuple)
if decimals > digits:
# We have leading zeros up to or past the decimal point. Count
# everything past the decimal point as a digit. We do not count
# 0 before the decimal point as a digit since that would mean
# we would not allow max_digits = decimal_places.
digits = decimals
whole_digits = digits - decimals
if self.max_digits is not None and digits > self.max_digits:
raise ValidationError(self.error_messages['max_digits'] % self.max_digits)
if self.decimal_places is not None and decimals > self.decimal_places:
raise ValidationError(self.error_messages['max_decimal_places'] % self.decimal_places)
if self.max_digits is not None and self.decimal_places is not None and whole_digits > (self.max_digits - self.decimal_places):
raise ValidationError(self.error_messages['max_whole_digits'] % (self.max_digits - self.decimal_places))
return value
class DateField(Field):
widget = DateInput
default_error_messages = {
'invalid': _(u'Enter a valid date.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a date. Returns a Python
datetime.date object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
for format in self.input_formats or formats.get_format('DATE_INPUT_FORMATS'):
try:
return datetime.date(*time.strptime(value, format)[:3])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class TimeField(Field):
widget = TimeInput
default_error_messages = {
'invalid': _(u'Enter a valid time.')
}
def __init__(self, input_formats=None, *args, **kwargs):
super(TimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a time. Returns a Python
datetime.time object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.time):
return value
for format in self.input_formats or formats.get_format('TIME_INPUT_FORMATS'):
try:
return datetime.time(*time.strptime(value, format)[3:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class DateTimeField(Field):
widget = DateTimeInput
default_error_messages = {
'invalid': _(u'Enter a valid date/time.'),
}
def __init__(self, input_formats=None, *args, **kwargs):
super(DateTimeField, self).__init__(*args, **kwargs)
self.input_formats = input_formats
def to_python(self, value):
"""
Validates that the input can be converted to a datetime. Returns a
Python datetime.datetime object.
"""
if value in validators.EMPTY_VALUES:
return None
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
if isinstance(value, list):
# Input comes from a SplitDateTimeWidget, for example. So, it's two
# components: date and time.
if len(value) != 2:
raise ValidationError(self.error_messages['invalid'])
if value[0] in validators.EMPTY_VALUES and value[1] in validators.EMPTY_VALUES:
return None
value = '%s %s' % tuple(value)
for format in self.input_formats or formats.get_format('DATETIME_INPUT_FORMATS'):
try:
return datetime.datetime(*time.strptime(value, format)[:6])
except ValueError:
continue
raise ValidationError(self.error_messages['invalid'])
class RegexField(CharField):
def __init__(self, regex, max_length=None, min_length=None, error_message=None, *args, **kwargs):
"""
regex can be either a string or a compiled regular expression object.
error_message is an optional error message to use, if
'Enter a valid value' is too generic for you.
"""
# error_message is just kept for backwards compatibility:
if error_message:
error_messages = kwargs.get('error_messages') or {}
error_messages['invalid'] = error_message
kwargs['error_messages'] = error_messages
super(RegexField, self).__init__(max_length, min_length, *args, **kwargs)
if isinstance(regex, basestring):
regex = re.compile(regex)
self.regex = regex
self.validators.append(validators.RegexValidator(regex=regex))
class EmailField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid e-mail address.'),
}
default_validators = [validators.validate_email]
def clean(self, value):
value = self.to_python(value).strip()
return super(EmailField, self).clean(value)
class FileField(Field):
widget = ClearableFileInput
default_error_messages = {
'invalid': _(u"No file was submitted. Check the encoding type on the form."),
'missing': _(u"No file was submitted."),
'empty': _(u"The submitted file is empty."),
'max_length': _(u'Ensure this filename has at most %(max)d characters (it has %(length)d).'),
'contradiction': _(u'Please either submit a file or check the clear checkbox, not both.')
}
def __init__(self, *args, **kwargs):
self.max_length = kwargs.pop('max_length', None)
super(FileField, self).__init__(*args, **kwargs)
def to_python(self, data):
if data in validators.EMPTY_VALUES:
return None
# UploadedFile objects should have name and size attributes.
try:
file_name = data.name
file_size = data.size
except AttributeError:
raise ValidationError(self.error_messages['invalid'])
if self.max_length is not None and len(file_name) > self.max_length:
error_values = {'max': self.max_length, 'length': len(file_name)}
raise ValidationError(self.error_messages['max_length'] % error_values)
if not file_name:
raise ValidationError(self.error_messages['invalid'])
if not file_size:
raise ValidationError(self.error_messages['empty'])
return data
def clean(self, data, initial=None):
# If the widget got contradictory inputs, we raise a validation error
if data is FILE_INPUT_CONTRADICTION:
raise ValidationError(self.error_messages['contradiction'])
# False means the field value should be cleared; further validation is
# not needed.
if data is False:
if not self.required:
return False
# If the field is required, clearing is not possible (the widget
# shouldn't return False data in that case anyway). False is not
# in validators.EMPTY_VALUES; if a False value makes it this far
# it should be validated from here on out as None (so it will be
# caught by the required check).
data = None
if not data and initial:
return initial
return super(FileField, self).clean(data)
def bound_data(self, data, initial):
if data in (None, FILE_INPUT_CONTRADICTION):
return initial
return data
class ImageField(FileField):
default_error_messages = {
'invalid_image': _(u"Upload a valid image. The file you uploaded was either not an image or a corrupted image."),
}
def to_python(self, data):
"""
Checks that the file-upload field data contains a valid image (GIF, JPG,
PNG, possibly others -- whatever the Python Imaging Library supports).
"""
f = super(ImageField, self).to_python(data)
if f is None:
return None
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
import Image
# We need to get a file object for PIL. We might have a path or we might
# have to read the data into memory.
if hasattr(data, 'temporary_file_path'):
file = data.temporary_file_path()
else:
if hasattr(data, 'read'):
file = StringIO(data.read())
else:
file = StringIO(data['content'])
try:
# load() is the only method that can spot a truncated JPEG,
# but it cannot be called sanely after verify()
trial_image = Image.open(file)
trial_image.load()
# Since we're about to use the file again we have to reset the
# file object if possible.
if hasattr(file, 'reset'):
file.reset()
# verify() is the only method that can spot a corrupt PNG,
# but it must be called immediately after the constructor
trial_image = Image.open(file)
trial_image.verify()
except ImportError:
# Under PyPy, it is possible to import PIL. However, the underlying
# _imaging C module isn't available, so an ImportError will be
# raised. Catch and re-raise.
raise
except Exception: # Python Imaging Library doesn't recognize it as an image
raise ValidationError(self.error_messages['invalid_image'])
if hasattr(f, 'seek') and callable(f.seek):
f.seek(0)
return f
class URLField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid URL.'),
'invalid_link': _(u'This URL appears to be a broken link.'),
}
def __init__(self, max_length=None, min_length=None, verify_exists=False,
validator_user_agent=validators.URL_VALIDATOR_USER_AGENT, *args, **kwargs):
super(URLField, self).__init__(max_length, min_length, *args,
**kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists, validator_user_agent=validator_user_agent))
def to_python(self, value):
if value:
url_fields = list(urlparse.urlsplit(value))
if not url_fields[0]:
# If no URL scheme given, assume http://
url_fields[0] = 'http'
if not url_fields[1]:
# Assume that if no domain is provided, that the path segment
# contains the domain.
url_fields[1] = url_fields[2]
url_fields[2] = ''
# Rebuild the url_fields list, since the domain segment may now
# contain the path too.
value = urlparse.urlunsplit(url_fields)
url_fields = list(urlparse.urlsplit(value))
if not url_fields[2]:
# the path portion may need to be added before query params
url_fields[2] = '/'
value = urlparse.urlunsplit(url_fields)
return super(URLField, self).to_python(value)
class BooleanField(Field):
widget = CheckboxInput
def to_python(self, value):
"""Returns a Python boolean object."""
# Explicitly check for the string 'False', which is what a hidden field
# will submit for False. Also check for '0', since this is what
# RadioSelect will provide. Because bool("True") == bool('1') == True,
# we don't need to handle that explicitly.
if value in ('False', '0'):
value = False
else:
value = bool(value)
value = super(BooleanField, self).to_python(value)
if not value and self.required:
raise ValidationError(self.error_messages['required'])
return value
class NullBooleanField(BooleanField):
"""
A field whose valid values are None, True and False. Invalid values are
cleaned to None.
"""
widget = NullBooleanSelect
def to_python(self, value):
"""
Explicitly checks for the string 'True' and 'False', which is what a
hidden field will submit for True and False, and for '1' and '0', which
is what a RadioField will submit. Unlike the Booleanfield we need to
explicitly check for True, because we are not using the bool() function
"""
if value in (True, 'True', '1'):
return True
elif value in (False, 'False', '0'):
return False
else:
return None
def validate(self, value):
pass
class ChoiceField(Field):
widget = Select
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
}
def __init__(self, choices=(), required=True, widget=None, label=None,
initial=None, help_text=None, *args, **kwargs):
super(ChoiceField, self).__init__(required=required, widget=widget, label=label,
initial=initial, help_text=help_text, *args, **kwargs)
self.choices = choices
def _get_choices(self):
return self._choices
def _set_choices(self, value):
# Setting choices also sets the choices on the widget.
# choices can be any iterable, but we call list() on it because
# it will be consumed more than once.
self._choices = self.widget.choices = list(value)
choices = property(_get_choices, _set_choices)
def to_python(self, value):
"Returns a Unicode object."
if value in validators.EMPTY_VALUES:
return u''
return smart_unicode(value)
def validate(self, value):
"""
Validates that the input is in self.choices.
"""
super(ChoiceField, self).validate(value)
if value and not self.valid_value(value):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
def valid_value(self, value):
"Check to see if the provided value is a valid choice"
for k, v in self.choices:
if isinstance(v, (list, tuple)):
# This is an optgroup, so look inside the group for options
for k2, v2 in v:
if value == smart_unicode(k2):
return True
else:
if value == smart_unicode(k):
return True
return False
class TypedChoiceField(ChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', '')
super(TypedChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the value is in self.choices and can be coerced to the
right type.
"""
value = super(TypedChoiceField, self).to_python(value)
super(TypedChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
try:
value = self.coerce(value)
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': value})
return value
def validate(self, value):
pass
class MultipleChoiceField(ChoiceField):
hidden_widget = MultipleHiddenInput
widget = SelectMultiple
default_error_messages = {
'invalid_choice': _(u'Select a valid choice. %(value)s is not one of the available choices.'),
'invalid_list': _(u'Enter a list of values.'),
}
def to_python(self, value):
if not value:
return []
elif not isinstance(value, (list, tuple)):
raise ValidationError(self.error_messages['invalid_list'])
return [smart_unicode(val) for val in value]
def validate(self, value):
"""
Validates that the input is a list or tuple.
"""
if self.required and not value:
raise ValidationError(self.error_messages['required'])
# Validate that each value in the value list is in self.choices.
for val in value:
if not self.valid_value(val):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': val})
class TypedMultipleChoiceField(MultipleChoiceField):
def __init__(self, *args, **kwargs):
self.coerce = kwargs.pop('coerce', lambda val: val)
self.empty_value = kwargs.pop('empty_value', [])
super(TypedMultipleChoiceField, self).__init__(*args, **kwargs)
def to_python(self, value):
"""
Validates that the values are in self.choices and can be coerced to the
right type.
"""
value = super(TypedMultipleChoiceField, self).to_python(value)
super(TypedMultipleChoiceField, self).validate(value)
if value == self.empty_value or value in validators.EMPTY_VALUES:
return self.empty_value
new_value = []
for choice in value:
try:
new_value.append(self.coerce(choice))
except (ValueError, TypeError, ValidationError):
raise ValidationError(self.error_messages['invalid_choice'] % {'value': choice})
return new_value
def validate(self, value):
pass
class ComboField(Field):
"""
A Field whose clean() method calls multiple Field clean() methods.
"""
def __init__(self, fields=(), *args, **kwargs):
super(ComboField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by ComboField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def clean(self, value):
"""
Validates the given value against all of self.fields, which is a
list of Field instances.
"""
super(ComboField, self).clean(value)
for field in self.fields:
value = field.clean(value)
return value
class MultiValueField(Field):
"""
A Field that aggregates the logic of multiple Fields.
Its clean() method takes a "decompressed" list of values, which are then
cleaned into a single value according to self.fields. Each value in
this list is cleaned by the corresponding field -- the first value is
cleaned by the first field, the second value is cleaned by the second
field, etc. Once all fields are cleaned, the list of clean values is
"compressed" into a single value.
Subclasses should not have to implement clean(). Instead, they must
implement compress(), which takes a list of valid values and returns a
"compressed" version of those values -- a single value.
You'll probably want to use this with MultiWidget.
"""
default_error_messages = {
'invalid': _(u'Enter a list of values.'),
}
def __init__(self, fields=(), *args, **kwargs):
super(MultiValueField, self).__init__(*args, **kwargs)
# Set 'required' to False on the individual fields, because the
# required validation will be handled by MultiValueField, not by those
# individual fields.
for f in fields:
f.required = False
self.fields = fields
def validate(self, value):
pass
def clean(self, value):
"""
Validates every value in the given list. A value is validated against
the corresponding Field in self.fields.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), clean() would call
DateField.clean(value[0]) and TimeField.clean(value[1]).
"""
clean_data = []
errors = ErrorList()
if not value or isinstance(value, (list, tuple)):
if not value or not [v for v in value if v not in validators.EMPTY_VALUES]:
if self.required:
raise ValidationError(self.error_messages['required'])
else:
return self.compress([])
else:
raise ValidationError(self.error_messages['invalid'])
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
if self.required and field_value in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['required'])
try:
clean_data.append(field.clean(field_value))
except ValidationError, e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter.
errors.extend(e.messages)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
return out
def compress(self, data_list):
"""
Returns a single value for the given list of values. The values can be
assumed to be valid.
For example, if this MultiValueField was instantiated with
fields=(DateField(), TimeField()), this might return a datetime
object created by combining the date and time in data_list.
"""
raise NotImplementedError('Subclasses must implement this method.')
class FilePathField(ChoiceField):
def __init__(self, path, match=None, recursive=False, required=True,
widget=None, label=None, initial=None, help_text=None,
*args, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
super(FilePathField, self).__init__(choices=(), required=required,
widget=widget, label=label, initial=initial, help_text=help_text,
*args, **kwargs)
if self.required:
self.choices = []
else:
self.choices = [("", "---------")]
if self.match is not None:
self.match_re = re.compile(self.match)
if recursive:
for root, dirs, files in sorted(os.walk(self.path)):
for f in files:
if self.match is None or self.match_re.search(f):
f = os.path.join(root, f)
self.choices.append((f, f.replace(path, "", 1)))
else:
try:
for f in sorted(os.listdir(self.path)):
full_file = os.path.join(self.path, f)
if os.path.isfile(full_file) and (self.match is None or self.match_re.search(f)):
self.choices.append((full_file, f))
except OSError:
pass
self.widget.choices = self.choices
class SplitDateTimeField(MultiValueField):
widget = SplitDateTimeWidget
hidden_widget = SplitHiddenDateTimeWidget
default_error_messages = {
'invalid_date': _(u'Enter a valid date.'),
'invalid_time': _(u'Enter a valid time.'),
}
def __init__(self, input_date_formats=None, input_time_formats=None, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
localize = kwargs.get('localize', False)
fields = (
DateField(input_formats=input_date_formats,
error_messages={'invalid': errors['invalid_date']},
localize=localize),
TimeField(input_formats=input_time_formats,
error_messages={'invalid': errors['invalid_time']},
localize=localize),
)
super(SplitDateTimeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
# Raise a validation error if time or date is empty
# (possible if SplitDateTimeField has required=False).
if data_list[0] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_date'])
if data_list[1] in validators.EMPTY_VALUES:
raise ValidationError(self.error_messages['invalid_time'])
return datetime.datetime.combine(*data_list)
return None
class IPAddressField(CharField):
default_error_messages = {
'invalid': _(u'Enter a valid IPv4 address.'),
}
default_validators = [validators.validate_ipv4_address]
class SlugField(CharField):
default_error_messages = {
'invalid': _(u"Enter a valid 'slug' consisting of letters, numbers,"
u" underscores or hyphens."),
}
default_validators = [validators.validate_slug]
| mit |
libscie/liberator | liberator/lib/python3.6/site-packages/django/core/checks/security/sessions.py | 51 | 2782 | from django.conf import settings
from .. import Tags, Warning, register
from ..utils import patch_middleware_message
def add_session_cookie_message(message):
return message + (
" Using a secure-only session cookie makes it more difficult for "
"network traffic sniffers to hijack user sessions."
)
W010 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_SECURE to True."
),
id='security.W010',
)
W011 = Warning(
add_session_cookie_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_SECURE to True."
),
id='security.W011',
)
W012 = Warning(
add_session_cookie_message("SESSION_COOKIE_SECURE is not set to True."),
id='security.W012',
)
def add_httponly_message(message):
return message + (
" Using an HttpOnly session cookie makes it more difficult for "
"cross-site scripting attacks to hijack user sessions."
)
W013 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions' in your INSTALLED_APPS, "
"but you have not set SESSION_COOKIE_HTTPONLY to True.",
),
id='security.W013',
)
W014 = Warning(
add_httponly_message(
"You have 'django.contrib.sessions.middleware.SessionMiddleware' "
"in your MIDDLEWARE, but you have not set "
"SESSION_COOKIE_HTTPONLY to True."
),
id='security.W014',
)
W015 = Warning(
add_httponly_message("SESSION_COOKIE_HTTPONLY is not set to True."),
id='security.W015',
)
@register(Tags.security, deploy=True)
def check_session_cookie_secure(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_SECURE:
if _session_app():
errors.append(W010)
if _session_middleware():
errors.append(patch_middleware_message(W011))
if len(errors) > 1:
errors = [W012]
return errors
@register(Tags.security, deploy=True)
def check_session_cookie_httponly(app_configs, **kwargs):
errors = []
if not settings.SESSION_COOKIE_HTTPONLY:
if _session_app():
errors.append(W013)
if _session_middleware():
errors.append(patch_middleware_message(W014))
if len(errors) > 1:
errors = [W015]
return errors
def _session_middleware():
return ("django.contrib.sessions.middleware.SessionMiddleware" in settings.MIDDLEWARE_CLASSES or
settings.MIDDLEWARE and "django.contrib.sessions.middleware.SessionMiddleware" in settings.MIDDLEWARE)
def _session_app():
return "django.contrib.sessions" in settings.INSTALLED_APPS
| cc0-1.0 |
meletakis/collato | lib/python2.7/site-packages/autocomplete_light/example_apps/basic/forms.py | 2 | 1045 | from django import VERSION
try:
import genericm2m
except ImportError:
genericm2m = None
try:
import taggit
except ImportError:
taggit = None
import autocomplete_light
autocomplete_light.autodiscover()
from .models import *
class DjangoCompatMeta:
if VERSION >= (1, 6):
fields = '__all__'
class FkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = FkModel
class OtoModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = OtoModel
class MtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = MtmModel
class GfkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GfkModel
if genericm2m:
class GmtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GmtmModel
if taggit:
class TaggitModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = TaggitModel
| gpl-2.0 |
lutrellja15/gir_app_labs_at_aamu | models/places.py | 2 | 1242 | # Copyright 2017 The GiR @ AAMU Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import the "new" AppEngine Database module.
from google.appengine.ext import ndb
class Place(ndb.Model):
"""Stores location of event.
For example: a location of event that a group voted on.
Properties:
physical_location: Where group decides to meet.
when_open: Hrs location is open, JSON-Encoded, like:
"{ # for each day of the week...
'Monday': [
('09:00', '17:00'), # open/close
],
etc.
}"
parking: Is parking available?
outdoors: Outdoors or not?
name: TBD
"""
physical_location = ndb.GeoPtProperty()
when_open = ndb.JsonProperty()
outdoors = ndb.BooleanProperty()
| apache-2.0 |
eevee/pelican | pelican/tests/test_generators.py | 12 | 31166 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from codecs import open
try:
from unittest.mock import MagicMock
except ImportError:
try:
from mock import MagicMock
except ImportError:
MagicMock = False
from shutil import rmtree
from tempfile import mkdtemp
from pelican.generators import (Generator, ArticlesGenerator, PagesGenerator,
StaticGenerator, TemplatePagesGenerator)
from pelican.writers import Writer
from pelican.tests.support import unittest, get_settings
import locale
CUR_DIR = os.path.dirname(__file__)
CONTENT_DIR = os.path.join(CUR_DIR, 'content')
class TestGenerator(unittest.TestCase):
def setUp(self):
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
self.settings = get_settings()
self.settings['READERS'] = {'asc': None}
self.generator = Generator(self.settings.copy(), self.settings,
CUR_DIR, self.settings['THEME'], None)
def tearDown(self):
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_include_path(self):
self.settings['IGNORE_FILES'] = {'ignored1.rst', 'ignored2.rst'}
filename = os.path.join(CUR_DIR, 'content', 'article.rst')
include_path = self.generator._include_path
self.assertTrue(include_path(filename))
self.assertTrue(include_path(filename, extensions=('rst',)))
self.assertFalse(include_path(filename, extensions=('md',)))
ignored_file = os.path.join(CUR_DIR, 'content', 'ignored1.rst')
self.assertFalse(include_path(ignored_file))
def test_get_files_exclude(self):
"""Test that Generator.get_files() properly excludes directories.
"""
# We use our own Generator so we can give it our own content path
generator = Generator(context=self.settings.copy(),
settings=self.settings,
path=os.path.join(CUR_DIR, 'nested_content'),
theme=self.settings['THEME'], output_path=None)
filepaths = generator.get_files(paths=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(expected_files - found_files,
"get_files() failed to find one or more files")
# Test string as `paths` argument rather than list
filepaths = generator.get_files(paths='maindir')
found_files = {os.path.basename(f) for f in filepaths}
expected_files = {'maindir.md', 'subdir.md'}
self.assertFalse(expected_files - found_files,
"get_files() failed to find one or more files")
filepaths = generator.get_files(paths=[''], exclude=['maindir'])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn('maindir.md', found_files,
"get_files() failed to exclude a top-level directory")
self.assertNotIn('subdir.md', found_files,
"get_files() failed to exclude a subdir of an excluded directory")
filepaths = generator.get_files(paths=[''],
exclude=[os.path.join('maindir', 'subdir')])
found_files = {os.path.basename(f) for f in filepaths}
self.assertNotIn('subdir.md', found_files,
"get_files() failed to exclude a subdirectory")
filepaths = generator.get_files(paths=[''], exclude=['subdir'])
found_files = {os.path.basename(f) for f in filepaths}
self.assertIn('subdir.md', found_files,
"get_files() excluded a subdirectory by name, ignoring its path")
class TestArticlesGenerator(unittest.TestCase):
@classmethod
def setUpClass(cls):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['READERS'] = {'asc': None}
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
cls.generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
cls.generator.generate_context()
cls.articles = cls.distill_articles(cls.generator.articles)
def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.')
def tearDown(self):
rmtree(self.temp_cache)
@staticmethod
def distill_articles(articles):
return [[article.title, article.status, article.category.name,
article.template] for article in articles]
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_generate_feeds(self):
settings = get_settings()
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
writer = MagicMock()
generator.generate_feeds(writer)
writer.write_feed.assert_called_with([], settings,
'feeds/all.atom.xml')
generator = ArticlesGenerator(
context=settings, settings=get_settings(FEED_ALL_ATOM=None),
path=None, theme=settings['THEME'], output_path=None)
writer = MagicMock()
generator.generate_feeds(writer)
self.assertFalse(writer.write_feed.called)
def test_generate_context(self):
articles_expected = [
['Article title', 'published', 'Default', 'article'],
['Article with markdown and summary metadata multi', 'published',
'Default', 'article'],
['Article with markdown and summary metadata single', 'published',
'Default', 'article'],
['Article with markdown containing footnotes', 'published',
'Default', 'article'],
['Article with template', 'published', 'Default', 'custom'],
['Rst with filename metadata', 'published', 'yeah', 'article'],
['Test Markdown extensions', 'published', 'Default', 'article'],
['Test markdown File', 'published', 'test', 'article'],
['Test md File', 'published', 'test', 'article'],
['Test mdown File', 'published', 'test', 'article'],
['Test mkd File', 'published', 'test', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['This is a super article !', 'published', 'Yeah', 'article'],
['Article with Nonconformant HTML meta tags', 'published', 'Default', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'yeah', 'article'],
['This is a super article !', 'published', 'Default', 'article'],
['This is an article with category !', 'published', 'yeah',
'article'],
['This is an article with multiple authors!', 'published', 'Default', 'article'],
['This is an article with multiple authors!', 'published', 'Default', 'article'],
['This is an article with multiple authors in list format!', 'published', 'Default', 'article'],
['This is an article with multiple authors in lastname, firstname format!', 'published', 'Default', 'article'],
['This is an article without category !', 'published', 'Default',
'article'],
['This is an article without category !', 'published',
'TestCategory', 'article'],
['An Article With Code Block To Test Typogrify Ignore',
'published', 'Default', 'article'],
['マックOS X 10.8でパイソンとVirtualenvをインストールと設定', 'published',
'指導書', 'article'],
]
self.assertEqual(sorted(articles_expected), sorted(self.articles))
def test_generate_categories(self):
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in self.generator.categories]
categories_alternatives = (
sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書']),
sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in self.generator.categories]
categories_expected = ['default', 'testcategory', 'yeah', 'test',
'zhi-dao-shu']
self.assertEqual(sorted(categories), sorted(categories_expected))
def test_do_not_use_folder_as_category(self):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['USE_FOLDER_AS_CATEGORY'] = False
settings['CACHE_PATH'] = self.temp_cache
settings['READERS'] = {'asc': None}
settings['filenames'] = {}
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
# test for name
# categories are grouped by slug; if two categories have the same slug
# but different names they will be grouped together, the first one in
# terms of process order will define the name for that category
categories = [cat.name for cat, _ in generator.categories]
categories_alternatives = (
sorted(['Default', 'Yeah', 'test', '指導書']),
sorted(['Default', 'yeah', 'test', '指導書']),
)
self.assertIn(sorted(categories), categories_alternatives)
# test for slug
categories = [cat.slug for cat, _ in generator.categories]
categories_expected = ['default', 'yeah', 'test', 'zhi-dao-shu']
self.assertEqual(sorted(categories), sorted(categories_expected))
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_default(self):
settings = get_settings(filenames={})
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with("archives.html",
generator.get_template("archives"), settings,
blog=True, paginated={}, page_name='archives')
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_modified(self):
settings = get_settings()
settings['DIRECT_TEMPLATES'] = ['archives']
settings['ARCHIVES_SAVE_AS'] = 'archives/index.html'
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
write.assert_called_with("archives/index.html",
generator.get_template("archives"), settings,
blog=True, paginated={},
page_name='archives/index')
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_direct_templates_save_as_false(self):
settings = get_settings()
settings['DIRECT_TEMPLATES'] = ['archives']
settings['ARCHIVES_SAVE_AS'] = False
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
write = MagicMock()
generator.generate_direct_templates(write)
self.assertEqual(write.call_count, 0)
def test_per_article_template(self):
"""
Custom template articles get the field but standard/unset are None
"""
custom_template = ['Article with template', 'published', 'Default',
'custom']
standard_template = ['This is a super article !', 'published', 'Yeah',
'article']
self.assertIn(custom_template, self.articles)
self.assertIn(standard_template, self.articles)
@unittest.skipUnless(MagicMock, 'Needs Mock module')
def test_period_in_timeperiod_archive(self):
"""
Test that the context of a generated period_archive is passed
'period' : a tuple of year, month, day according to the time period
"""
settings = get_settings(filenames={})
settings['YEAR_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/index.html'
settings['CACHE_PATH'] = self.temp_cache
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970]
self.assertEqual(len(dates), 1)
#among other things it must have at least been called with this
settings["period"] = (1970,)
write.assert_called_with("posts/1970/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
del settings["period"]
settings['MONTH_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970
and d.date.month == 1]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January")
#among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
del settings["period"]
settings['DAY_ARCHIVE_SAVE_AS'] = 'posts/{date:%Y}/{date:%b}/{date:%d}/index.html'
generator = ArticlesGenerator(
context=settings, settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
write = MagicMock()
generator.generate_period_archives(write)
dates = [d for d in generator.dates if d.date.year == 1970
and d.date.month == 1
and d.date.day == 1]
self.assertEqual(len(dates), 1)
settings["period"] = (1970, "January", 1)
#among other things it must have at least been called with this
write.assert_called_with("posts/1970/Jan/01/index.html",
generator.get_template("period_archives"),
settings,
blog=True, dates=dates)
def test_nonexistent_template(self):
"""Attempt to load a non-existent template"""
settings = get_settings(filenames={})
generator = ArticlesGenerator(
context=settings, settings=settings,
path=None, theme=settings['THEME'], output_path=None)
self.assertRaises(Exception, generator.get_template, "not_a_template")
def test_generate_authors(self):
"""Check authors generation."""
authors = [author.name for author, _ in self.generator.authors]
authors_expected = sorted(['Alexis Métaireau', 'Author, First', 'Author, Second', 'First Author', 'Second Author'])
self.assertEqual(sorted(authors), authors_expected)
# test for slug
authors = [author.slug for author, _ in self.generator.authors]
authors_expected = ['alexis-metaireau', 'author-first', 'author-second', 'first-author', 'second-author']
self.assertEqual(sorted(authors), sorted(authors_expected))
def test_standard_metadata_in_default_metadata(self):
settings = get_settings(filenames={})
settings['CACHE_CONTENT'] = False
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['DEFAULT_METADATA'] = (('author', 'Blogger'),
# category will be ignored in favor of
# DEFAULT_CATEGORY
('category', 'Random'),
('tags', 'general, untagged'))
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
authors = sorted([author.name for author, _ in generator.authors])
authors_expected = sorted(['Alexis Métaireau', 'Blogger',
'Author, First', 'Author, Second',
'First Author', 'Second Author'])
self.assertEqual(authors, authors_expected)
categories = sorted([category.name
for category, _ in generator.categories])
categories_expected = [
sorted(['Default', 'TestCategory', 'yeah', 'test', '指導書']),
sorted(['Default', 'TestCategory', 'Yeah', 'test', '指導書'])]
self.assertIn(categories, categories_expected)
tags = sorted([tag.name for tag in generator.tags])
tags_expected = sorted(['bar', 'foo', 'foobar', 'general', 'untagged',
'パイソン', 'マック'])
self.assertEqual(tags, tags_expected)
def test_article_order_by(self):
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'title'
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
expected = [
'An Article With Code Block To Test Typogrify Ignore',
'Article title',
'Article with Nonconformant HTML meta tags',
'Article with markdown and summary metadata multi',
'Article with markdown and summary metadata single',
'Article with markdown containing footnotes',
'Article with template',
'Rst with filename metadata',
'Test Markdown extensions',
'Test markdown File',
'Test md File',
'Test mdown File',
'Test mkd File',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is a super article !',
'This is an article with category !',
'This is an article with multiple authors in lastname, firstname format!',
'This is an article with multiple authors in list format!',
'This is an article with multiple authors!',
'This is an article with multiple authors!',
'This is an article without category !',
'This is an article without category !',
'マックOS X 10.8でパイソンとVirtualenvをインストールと設定']
articles = [article.title for article in generator.articles]
self.assertEqual(articles, expected)
# reversed title
settings = get_settings(filenames={})
settings['DEFAULT_CATEGORY'] = 'Default'
settings['DEFAULT_DATE'] = (1970, 1, 1)
settings['CACHE_CONTENT'] = False # cache not needed for this logic tests
settings['ARTICLE_ORDER_BY'] = 'reversed-title'
generator = ArticlesGenerator(
context=settings.copy(), settings=settings,
path=CONTENT_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
articles = [article.title for article in generator.articles]
self.assertEqual(articles, list(reversed(expected)))
class TestPageGenerator(unittest.TestCase):
# Note: Every time you want to test for a new field; Make sure the test
# pages in "TestPages" have all the fields Add it to distilled in
# distill_pages Then update the assertEqual in test_generate_context
# to match expected
def setUp(self):
self.temp_cache = mkdtemp(prefix='pelican_cache.')
def tearDown(self):
rmtree(self.temp_cache)
def distill_pages(self, pages):
return [[page.title, page.status, page.template] for page in pages]
def test_generate_context(self):
settings = get_settings(filenames={})
settings['CACHE_PATH'] = self.temp_cache
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
hidden_pages = self.distill_pages(generator.hidden_pages)
pages_expected = [
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
['Page with a bunch of links', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
]
hidden_pages_expected = [
['This is a test hidden page', 'hidden', 'page'],
['This is a markdown test hidden page', 'hidden', 'page'],
['This is a test hidden page with a custom template', 'hidden',
'custom']
]
self.assertEqual(sorted(pages_expected), sorted(pages))
self.assertEqual(
sorted(pages_expected),
sorted(self.distill_pages(generator.context['pages'])))
self.assertEqual(sorted(hidden_pages_expected), sorted(hidden_pages))
self.assertEqual(
sorted(hidden_pages_expected),
sorted(self.distill_pages(generator.context['hidden_pages'])))
def test_generate_sorted(self):
settings = get_settings(filenames={})
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1)
# default sort (filename)
pages_expected_sorted_by_filename = [
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
]
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_filename, pages)
# sort by title
pages_expected_sorted_by_title = [
['A Page (Test) for sorting', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['This is a test page', 'published', 'page'],
['This is a test page with a preset template', 'published',
'custom'],
]
settings['PAGE_ORDER_BY'] = 'title'
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
# sort by title reversed
pages_expected_sorted_by_title = [
['This is a test page with a preset template', 'published',
'custom'],
['This is a test page', 'published', 'page'],
['This is a markdown test page', 'published', 'page'],
['Page with a bunch of links', 'published', 'page'],
['A Page (Test) for sorting', 'published', 'page'],
]
settings['PAGE_ORDER_BY'] = 'reversed-title'
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages = self.distill_pages(generator.pages)
self.assertEqual(pages_expected_sorted_by_title, pages)
def test_tag_and_category_links_on_generated_pages(self):
"""
Test to ensure links of the form {tag}tagname and {category}catname
are generated correctly on pages
"""
settings = get_settings(filenames={})
settings['PAGE_PATHS'] = ['TestPages'] # relative to CUR_DIR
settings['CACHE_PATH'] = self.temp_cache
settings['DEFAULT_DATE'] = (1970, 1, 1)
generator = PagesGenerator(
context=settings.copy(), settings=settings,
path=CUR_DIR, theme=settings['THEME'], output_path=None)
generator.generate_context()
pages_by_title = {p.title: p.content for p in generator.pages}
test_content = pages_by_title['Page with a bunch of links']
self.assertIn('<a href="/category/yeah.html">', test_content)
self.assertIn('<a href="/tag/matsuku.html">', test_content)
class TestTemplatePagesGenerator(unittest.TestCase):
TEMPLATE_CONTENT = "foo: {{ foo }}"
def setUp(self):
self.temp_content = mkdtemp(prefix='pelicantests.')
self.temp_output = mkdtemp(prefix='pelicantests.')
self.old_locale = locale.setlocale(locale.LC_ALL)
locale.setlocale(locale.LC_ALL, str('C'))
def tearDown(self):
rmtree(self.temp_content)
rmtree(self.temp_output)
locale.setlocale(locale.LC_ALL, self.old_locale)
def test_generate_output(self):
settings = get_settings()
settings['STATIC_PATHS'] = ['static']
settings['TEMPLATE_PAGES'] = {
'template/source.html': 'generated/file.html'
}
generator = TemplatePagesGenerator(
context={'foo': 'bar'}, settings=settings,
path=self.temp_content, theme='', output_path=self.temp_output)
# create a dummy template file
template_dir = os.path.join(self.temp_content, 'template')
template_path = os.path.join(template_dir, 'source.html')
os.makedirs(template_dir)
with open(template_path, 'w') as template_file:
template_file.write(self.TEMPLATE_CONTENT)
writer = Writer(self.temp_output, settings=settings)
generator.generate_output(writer)
output_path = os.path.join(self.temp_output, 'generated', 'file.html')
# output file has been generated
self.assertTrue(os.path.exists(output_path))
# output content is correct
with open(output_path, 'r') as output_file:
self.assertEqual(output_file.read(), 'foo: bar')
class TestStaticGenerator(unittest.TestCase):
def setUp(self):
self.content_path = os.path.join(CUR_DIR, 'mixed_content')
def test_static_excludes(self):
"""Test that StaticGenerator respects STATIC_EXCLUDES.
"""
settings = get_settings(STATIC_EXCLUDES=['subdir'],
PATH=self.content_path, STATIC_PATHS=[''])
context = settings.copy()
context['filenames'] = {}
StaticGenerator(context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertNotIn('subdir_fake_image.jpg', staticnames,
"StaticGenerator processed a file in a STATIC_EXCLUDES directory")
self.assertIn('fake_image.jpg', staticnames,
"StaticGenerator skipped a file that it should have included")
def test_static_exclude_sources(self):
"""Test that StaticGenerator respects STATIC_EXCLUDE_SOURCES.
"""
# Test STATIC_EXCLUDE_SOURCES=True
settings = get_settings(STATIC_EXCLUDE_SOURCES=True,
PATH=self.content_path, PAGE_PATHS=[''], STATIC_PATHS=[''],
CACHE_CONTENT=False)
context = settings.copy()
context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertFalse(any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=True failed to exclude a markdown file")
# Test STATIC_EXCLUDE_SOURCES=False
settings.update(STATIC_EXCLUDE_SOURCES=False)
context = settings.copy()
context['filenames'] = {}
for generator_class in (PagesGenerator, StaticGenerator):
generator_class(context=context, settings=settings,
path=settings['PATH'], output_path=None,
theme=settings['THEME']).generate_context()
staticnames = [os.path.basename(c.source_path)
for c in context['staticfiles']]
self.assertTrue(any(name.endswith(".md") for name in staticnames),
"STATIC_EXCLUDE_SOURCES=False failed to include a markdown file")
| agpl-3.0 |
flgiordano/netcash | +/google-cloud-sdk/lib/third_party/ruamel/yaml/dumper.py | 1 | 4101 | from __future__ import absolute_import
__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper']
from .emitter import *
from .serializer import *
from .representer import *
from .resolver import *
class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class Dumper(Emitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start,
explicit_end=explicit_end,
version=version, tags=tags)
RoundTripRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
| bsd-3-clause |
mxOBS/deb-pkg_trusty_chromium-browser | tools/perf/measurements/smooth_gesture_util_unittest.py | 9 | 6377 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import unittest
from measurements import smooth_gesture_util as sg_util
from telemetry.core.platform import tracing_category_filter
from telemetry.core.platform import tracing_options
from telemetry.page import page as page_module
from telemetry.page import page_test
from telemetry.timeline import async_slice
from telemetry.timeline import model as model_module
from telemetry.unittest_util import page_test_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class SmoothGestureUtilTest(unittest.TestCase):
def testGetAdjustedInteractionIfContainGesture(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ X ] [ Y ]
# [ sub_async_slice_X ]
# [ record_1]
# [ record_6]
# [ record_2 ] [ record_3 ]
# [ record_4 ]
# [ record_5 ]
#
# Note: X and Y are async slice with name
# SyntheticGestureController::running
async_slice_X = async_slice.AsyncSlice(
'X', 'SyntheticGestureController::running', 10, duration=20,
start_thread=renderer_main, end_thread=renderer_main)
sub_async_slice_X = async_slice.AsyncSlice(
'X', 'SyntheticGestureController::running', 10, duration=20,
start_thread=renderer_main, end_thread=renderer_main)
sub_async_slice_X.parent_slice = async_slice_X
async_slice_X.AddSubSlice(sub_async_slice_X)
async_slice_Y = async_slice.AsyncSlice(
'X', 'SyntheticGestureController::running', 60, duration=20,
start_thread=renderer_main, end_thread=renderer_main)
renderer_main.AddAsyncSlice(async_slice_X)
renderer_main.AddAsyncSlice(async_slice_Y)
model.FinalizeImport(shift_world_to_zero=False)
record_1 = tir_module.TimelineInteractionRecord('Gesture_included', 15, 25)
record_2 = tir_module.TimelineInteractionRecord(
'Gesture_overlapped_left', 5, 25)
record_3 = tir_module.TimelineInteractionRecord(
'Gesture_overlapped_right', 25, 35)
record_4 = tir_module.TimelineInteractionRecord(
'Gesture_containing', 5, 35)
record_5 = tir_module.TimelineInteractionRecord(
'Gesture_non_overlapped', 35, 45)
record_6 = tir_module.TimelineInteractionRecord('Action_included', 15, 25)
adjusted_record_1 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_1)
self.assertEquals(adjusted_record_1.start, 10)
self.assertEquals(adjusted_record_1.end, 30)
self.assertTrue(adjusted_record_1 is not record_1)
adjusted_record_2 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_2)
self.assertEquals(adjusted_record_2.start, 10)
self.assertEquals(adjusted_record_2.end, 30)
adjusted_record_3 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_3)
self.assertEquals(adjusted_record_3.start, 10)
self.assertEquals(adjusted_record_3.end, 30)
adjusted_record_4 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_4)
self.assertEquals(adjusted_record_4.start, 10)
self.assertEquals(adjusted_record_4.end, 30)
adjusted_record_5 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_5)
self.assertEquals(adjusted_record_5.start, 35)
self.assertEquals(adjusted_record_5.end, 45)
self.assertTrue(adjusted_record_5 is not record_5)
adjusted_record_6 = sg_util.GetAdjustedInteractionIfContainGesture(
model, record_6)
self.assertEquals(adjusted_record_6.start, 15)
self.assertEquals(adjusted_record_6.end, 25)
self.assertTrue(adjusted_record_6 is not record_6)
class ScrollingPage(page_module.Page):
def __init__(self, url, page_set, base_dir):
super(ScrollingPage, self).__init__(url, page_set, base_dir)
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
# Add 0.5s gap between when Gesture records are issued to when we actually
# scroll the page.
time.sleep(0.5)
action_runner.ScrollPage()
time.sleep(0.5)
interaction.End()
class SmoothGestureTest(page_test_test_case.PageTestTestCase):
def testSmoothGestureAdjusted(self):
ps = self.CreateEmptyPageSet()
ps.AddUserStory(ScrollingPage(
'file://scrollable_page.html', ps, base_dir=ps.base_dir))
models = []
tab_ids = []
class ScrollingGestureTestMeasurement(page_test.PageTest):
def __init__(self):
# pylint: disable=bad-super-call
super(ScrollingGestureTestMeasurement, self).__init__(
'RunPageInteractions', False)
def WillRunActions(self, _page, tab):
options = tracing_options.TracingOptions()
options.enable_chrome_trace = True
tab.browser.platform.tracing_controller.Start(
options, tracing_category_filter.TracingCategoryFilter())
def DidRunActions(self, _page, tab):
models.append(model_module.TimelineModel(
tab.browser.platform.tracing_controller.Stop()))
tab_ids.append(tab.id)
def ValidateAndMeasurePage(self, _page, _tab, _results):
pass
self.RunMeasurement(ScrollingGestureTestMeasurement(), ps)
timeline_model = models[0]
renderer_thread = timeline_model.GetRendererThreadFromTabId(
tab_ids[0])
smooth_record = None
for e in renderer_thread.async_slices:
if tir_module.IsTimelineInteractionRecord(e.name):
smooth_record = tir_module.TimelineInteractionRecord.FromAsyncEvent(e)
self.assertIsNotNone(smooth_record)
adjusted_smooth_gesture = (
sg_util.GetAdjustedInteractionIfContainGesture(
timeline_model, smooth_record))
# Test that the scroll gesture starts at at least 500ms after the start of
# the interaction record and ends at at least 500ms before the end of
# interaction record.
self.assertLessEqual(
500, adjusted_smooth_gesture.start - smooth_record.start)
self.assertLessEqual(
500, smooth_record.end - adjusted_smooth_gesture.end)
| bsd-3-clause |
neo1218/guisheng2 | tests/test_client.py | 2 | 1467 | # coding: utf-8
"""
test_client.py
~~~~~~~~~~~~~~
使用测试客户端,模拟客户端的请求,发送功能
"""
import unittest
from flask import url_for
from app import create_app, db
from app.models import Role, User
class FlaskClientTestCase(unittest.TestCase):
"""flask 测试客户端"""
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
# create test account
u = User(
email = '[email protected]',
password = 'test',
username = 'test'
)
db.session.add(u)
db.session.commit()
self.client = self.app.test_client(use_cookies=True)
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_homepage(self):
"""test home_page"""
response = self.client.get(url_for('main.index'))
self.assertTrue('登录' in response.get_data(as_text=True))
self.assertFalse('新闻' in response.get_data(as_text=True))
def test_login(self):
"""test login_page"""
response = self.client.post(url_for('auth.login'), data={
'email':'[email protected]',
'password':'test'
}, follow_redirects = True)
data = response.get_data(as_text=True)
self.assertTrue('原创' in data)
| mit |
ralphbean/ansible | v2/ansible/plugins/action/script.py | 10 | 4218 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
TRANSFERS_FILES = True
def run(self, tmp=None, task_vars=None):
''' handler for file transfer operations '''
# FIXME: noop stuff still needs to be sorted out
#if self.runner.noop_on_check(inject):
# # in check mode, always skip this module
# return ReturnData(conn=conn, comm_ok=True,
# result=dict(skipped=True, msg='check mode not supported for this module'))
if not tmp:
tmp = self._make_tmp_path()
creates = self._task.args.get('creates')
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=creates), tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s exists" % creates))
removes = self._task.args.get('removes')
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
result = self._execute_module(module_name='stat', module_args=dict(path=removes), tmp=tmp, persist_files=True)
stat = result.get('stat', None)
if stat and not stat.get('exists', False):
return dict(skipped=True, msg=("skipped, since %s does not exist" % removes))
# the script name is the first item in the raw params, so we split it
# out now so we know the file name we need to transfer to the remote,
# and everything else is an argument to the script which we need later
# to append to the remote command
parts = self._task.args.get('_raw_params', '').strip().split()
source = parts[0]
args = ' '.join(parts[1:])
if self._task._role is not None:
source = self._loader.path_dwim_relative(self._task._role._role_path, 'files', source)
else:
source = self._loader.path_dwim(source)
# transfer the file to a remote tmp location
tmp_src = self._shell.join_path(tmp, os.path.basename(source))
self._connection.put_file(source, tmp_src)
sudoable = True
# set file permissions, more permissive when the copy is done as a different user
if self._connection_info.become and self._connection_info.become_user != 'root':
chmod_mode = 'a+rx'
sudoable = False
else:
chmod_mode = '+rx'
self._remote_chmod(tmp, chmod_mode, tmp_src, sudoable=sudoable)
# add preparation steps to one ssh roundtrip executing the script
env_string = self._compute_environment_string()
script_cmd = ' '.join([env_string, tmp_src, args])
result = self._low_level_execute_command(cmd=script_cmd, tmp=None, sudoable=sudoable)
# clean up after
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES:
self._remove_tmp_path(tmp)
result['changed'] = True
return result
| gpl-3.0 |
andras-tim/sphinxcontrib-httpdomain | sphinxcontrib/autohttp/flask.py | 1 | 5419 | """
sphinxcontrib.autohttp.flask
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The sphinx.ext.autodoc-style HTTP API reference builder (from Flask)
for sphinxcontrib.httpdomain.
:copyright: Copyright 2011 by Hong Minhee
:license: BSD, see LICENSE for details.
"""
import re
import six
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.statemachine import ViewList
from sphinx.util import force_decode
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.docstrings import prepare_docstring
from sphinx.pycode import ModuleAnalyzer
from sphinxcontrib import httpdomain
from sphinxcontrib.autohttp.common import http_directive, import_object
def translate_werkzeug_rule(rule):
from werkzeug.routing import parse_rule
buf = six.StringIO()
for conv, arg, var in parse_rule(rule):
if conv:
buf.write('(')
if conv != 'default':
buf.write(conv)
buf.write(':')
buf.write(var)
buf.write(')')
else:
buf.write(var)
return buf.getvalue()
def get_routes(app):
for rule in app.url_map.iter_rules():
path = translate_werkzeug_rule(rule.rule)
methods = rule.methods.difference(['OPTIONS', 'HEAD'])
for method in methods:
yield method, path, rule.endpoint
def get_route_order_key(item):
FIXED_ORDER = ('GET', 'POST', 'PUT', 'DELETE')
method, path, endpoint = item
method_index = len(FIXED_ORDER)
if method in FIXED_ORDER:
method_index = FIXED_ORDER.index(method)
return "%s__%03d__%02d" % (endpoint, len(path), method_index)
def get_routes_in_order(app):
ordered_routes = sorted(get_routes(app), key=get_route_order_key)
return ordered_routes
class AutoflaskDirective(Directive):
has_content = True
required_arguments = 1
option_spec = {'endpoints': directives.unchanged,
'blueprints': directives.unchanged,
'undoc-endpoints': directives.unchanged,
'undoc-blueprints': directives.unchanged,
'undoc-static': directives.unchanged,
'include-empty-docstring': directives.unchanged}
@property
def endpoints(self):
endpoints = self.options.get('endpoints', None)
if not endpoints:
return None
return frozenset(re.split(r'\s*,\s*', endpoints))
@property
def undoc_endpoints(self):
undoc_endpoints = self.options.get('undoc-endpoints', None)
if not undoc_endpoints:
return frozenset()
return frozenset(re.split(r'\s*,\s*', undoc_endpoints))
@property
def blueprints(self):
blueprints = self.options.get('blueprints', None)
if not blueprints:
return None
return frozenset(re.split(r'\s*,\s*', blueprints))
@property
def undoc_blueprints(self):
undoc_blueprints = self.options.get('undoc-blueprints', None)
if not undoc_blueprints:
return frozenset()
return frozenset(re.split(r'\s*,\s*', undoc_blueprints))
def make_rst(self):
app = import_object(self.arguments[0])
for method, path, endpoint in get_routes_in_order(app):
try:
blueprint, _, endpoint_internal = endpoint.rpartition('.')
if self.blueprints and blueprint not in self.blueprints:
continue
if blueprint in self.undoc_blueprints:
continue
except ValueError:
pass # endpoint is not within a blueprint
if self.endpoints and endpoint not in self.endpoints:
continue
if endpoint in self.undoc_endpoints:
continue
try:
static_url_path = app.static_url_path # Flask 0.7 or higher
except AttributeError:
static_url_path = app.static_path # Flask 0.6 or under
if ('undoc-static' in self.options and endpoint == 'static' and
path == static_url_path + '/(path:filename)'):
continue
view = app.view_functions[endpoint]
docstring = view.__doc__ or ''
if hasattr(view, 'view_class'):
meth_func = getattr(view.view_class, method.lower(), None)
if meth_func and meth_func.__doc__:
docstring = meth_func.__doc__
if not isinstance(docstring, six.text_type):
analyzer = ModuleAnalyzer.for_module(view.__module__)
docstring = force_decode(docstring, analyzer.encoding)
if not docstring and 'include-empty-docstring' not in self.options:
continue
docstring = prepare_docstring(docstring)
for line in http_directive(method, path, docstring):
yield line
def run(self):
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self.make_rst():
result.append(line, '<autoflask>')
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
if 'http' not in app.domains:
httpdomain.setup(app)
app.add_directive('autoflask', AutoflaskDirective)
| bsd-2-clause |
jjmleiro/hue | apps/sqoop/src/sqoop/client/resource.py | 30 | 3647 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.utils.translation import ugettext as _
from desktop.lib.python_util import force_dict_to_strings
from desktop.lib.rest.resource import Resource
class SqoopResource(Resource):
"""
Sqoop resources provide extra response headers.
@see desktop.lib.rest.resource
"""
def invoke(self, method, relpath=None, params=None, data=None, headers=None, allow_redirects=False):
"""
Invoke an API method.
Look for sqoop-error-code and sqoop-error-message.
@return: Raw body or JSON dictionary (if response content type is JSON).
"""
path = self._join_uri(relpath)
resp = self._client.execute(method,
path,
params=params,
data=data,
headers=headers,
allow_redirects=allow_redirects)
if resp.status_code == 200:
self._client.logger.debug(
"%(method)s Got response:\n%(headers)s\n%(body)s" % {
'method': method,
'headers': resp.headers,
'body': resp.content
})
# Sqoop always uses json
return self._format_response(resp)
else:
# Body will probably be a JSON formatted stacktrace
body = self._format_response(resp)
msg_format = "%(method)s Sqoop Error (%s): %s\n\t%s"
args = (resp.headers['sqoop-error-code'], resp.headers['sqoop-error-message'], body)
self._client.logger.error(msg_format % args)
raise IOError(_(msg_format) % args)
def delete(self, relpath=None, params=None, headers=None):
"""
Invoke the DELETE method on a resource.
@param relpath: Optional. A relative path to this resource's path.
@param params: Key-value data.
@return: A dictionary of the JSON result.
"""
return self.invoke("DELETE", relpath, params, None, headers)
def post(self, relpath=None, params=None, data=None, headers=None):
"""
Invoke the POST method on a resource.
@param relpath: Optional. A relative path to this resource's path.
@param params: Key-value data.
@param data: Optional. Body of the request.
@param contenttype: Optional.
@return: A dictionary of the JSON result.
"""
return self.invoke("POST", relpath, params, data, headers)
def put(self, relpath=None, params=None, data=None, headers=None):
"""
Invoke the PUT method on a resource.
@param relpath: Optional. A relative path to this resource's path.
@param params: Key-value data.
@param data: Optional. Body of the request.
@param contenttype: Optional.
@return: A dictionary of the JSON result.
"""
return self.invoke("PUT", relpath, params, data, headers)
def _make_headers(self, contenttype=None):
if contenttype:
return { 'Content-Type': contenttype }
return None
| apache-2.0 |
tinkhaven-organization/odoo | addons/account/wizard/account_chart.py | 271 | 5191 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_chart(osv.osv_memory):
"""
For Chart of Accounts
"""
_name = "account.chart"
_description = "Account chart"
_columns = {
'fiscalyear': fields.many2one('account.fiscalyear', \
'Fiscal year', \
help='Keep empty for all open fiscal years'),
'period_from': fields.many2one('account.period', 'Start period'),
'period_to': fields.many2one('account.period', 'End period'),
'target_move': fields.selection([('posted', 'All Posted Entries'),
('all', 'All Entries'),
], 'Target Moves', required=True),
}
def _get_fiscalyear(self, cr, uid, context=None):
"""Return default Fiscalyear value"""
return self.pool.get('account.fiscalyear').find(cr, uid, context=context)
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear_id=False, context=None):
res = {}
if fiscalyear_id:
start_period = end_period = False
cr.execute('''
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
ORDER BY p.date_start ASC, p.special DESC
LIMIT 1) AS period_start
UNION ALL
SELECT * FROM (SELECT p.id
FROM account_period p
LEFT JOIN account_fiscalyear f ON (p.fiscalyear_id = f.id)
WHERE f.id = %s
AND p.date_start < NOW()
ORDER BY p.date_stop DESC
LIMIT 1) AS period_stop''', (fiscalyear_id, fiscalyear_id))
periods = [i[0] for i in cr.fetchall()]
if periods:
start_period = periods[0]
if len(periods) > 1:
end_period = periods[1]
res['value'] = {'period_from': start_period, 'period_to': end_period}
else:
res['value'] = {'period_from': False, 'period_to': False}
return res
def account_chart_open_window(self, cr, uid, ids, context=None):
"""
Opens chart of Accounts
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of account chart’s IDs
@return: dictionary of Open account chart window on given fiscalyear and all Entries or posted entries
"""
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
period_obj = self.pool.get('account.period')
fy_obj = self.pool.get('account.fiscalyear')
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_account_tree')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
fiscalyear_id = data.get('fiscalyear', False) and data['fiscalyear'][0] or False
result['periods'] = []
if data['period_from'] and data['period_to']:
period_from = data.get('period_from', False) and data['period_from'][0] or False
period_to = data.get('period_to', False) and data['period_to'][0] or False
result['periods'] = period_obj.build_ctx_periods(cr, uid, period_from, period_to)
result['context'] = str({'fiscalyear': fiscalyear_id, 'periods': result['periods'], \
'state': data['target_move']})
if fiscalyear_id:
result['name'] += ':' + fy_obj.read(cr, uid, [fiscalyear_id], context=context)[0]['code']
return result
_defaults = {
'target_move': 'posted',
'fiscalyear': _get_fiscalyear,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cyrixhero/powerline | tests/vim.py | 15 | 18840 | # vim:fileencoding=utf-8:noet
_log = []
vars = {}
vvars = {'version': 703}
_tabpage = 0
_mode = 'n'
_buf_purge_events = set()
options = {
'paste': 0,
'ambiwidth': 'single',
'columns': 80,
'encoding': 'utf-8',
}
_last_bufnr = 0
_highlights = {}
from collections import defaultdict as _defaultdict
_environ = _defaultdict(lambda: '')
del _defaultdict
_thread_id = None
def _set_thread_id():
global _thread_id
from threading import current_thread
_thread_id = current_thread().ident
# Assuming import is done from the main thread
_set_thread_id()
def _print_log():
for item in _log:
print (item)
_log[:] = ()
def _vim(func):
from functools import wraps
from threading import current_thread
@wraps(func)
def f(*args, **kwargs):
global _thread_id
if _thread_id != current_thread().ident:
raise RuntimeError('Accessing vim from separate threads is not allowed')
_log.append((func.__name__, args))
return func(*args, **kwargs)
return f
def _unicode(func):
from functools import wraps
import sys
if sys.version_info < (3,):
return func
@wraps(func)
def f(*args, **kwargs):
from powerline.lib.unicode import u
ret = func(*args, **kwargs)
if isinstance(ret, bytes):
ret = u(ret)
return ret
return f
class _Buffers(object):
@_vim
def __init__(self):
self.d = {}
@_vim
def __len__(self):
return len(self.d)
@_vim
def __getitem__(self, item):
return self.d[item]
@_vim
def __setitem__(self, item, value):
self.d[item] = value
@_vim
def __iter__(self):
return iter(self.d.values())
@_vim
def __contains__(self, item):
return item in self.d
@_vim
def _keys(self):
return self.d.keys()
@_vim
def _pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
buffers = _Buffers()
class _ObjList(object):
@_vim
def __init__(self, objtype):
self.l = []
self.objtype = objtype
@_vim
def __getitem__(self, item):
return self.l[item - int(item > 0)]
@_vim
def __len__(self):
return len(self.l)
@_vim
def __iter__(self):
return iter(self.l)
@_vim
def _pop(self, idx):
obj = self.l.pop(idx - 1)
for moved_obj in self.l[idx - 1:]:
moved_obj.number -= 1
return obj
@_vim
def _append(self, *args, **kwargs):
return self.l.append(*args, **kwargs)
@_vim
def _new(self, *args, **kwargs):
number = len(self) + 1
new_obj = self.objtype(number, *args, **kwargs)
self._append(new_obj)
return new_obj
def _construct_result(r):
import sys
if sys.version_info < (3,):
return r
else:
if isinstance(r, str):
return r.encode('utf-8')
elif isinstance(r, list):
return [_construct_result(i) for i in r]
elif isinstance(r, dict):
return dict((
(_construct_result(k), _construct_result(v))
for k, v in r.items()
))
return r
def _str_func(func):
from functools import wraps
@wraps(func)
def f(*args, **kwargs):
return _construct_result(func(*args, **kwargs))
return f
def _log_print():
import sys
for entry in _log:
sys.stdout.write(repr(entry) + '\n')
_current_group = None
_on_wipeout = []
@_vim
def command(cmd):
global _current_group
cmd = cmd.lstrip()
if cmd.startswith('let g:'):
import re
varname, value = re.compile(r'^let g:(\w+)\s*=\s*(.*)').match(cmd).groups()
vars[varname] = value
elif cmd.startswith('hi '):
sp = cmd.split()
_highlights[sp[1]] = sp[2:]
elif cmd.startswith('augroup'):
augroup = cmd.partition(' ')[2]
if augroup.upper() == 'END':
_current_group = None
else:
_current_group = augroup
elif cmd.startswith('autocmd'):
rest = cmd.partition(' ')[2]
auevent, rest = rest.partition(' ')[::2]
pattern, aucmd = rest.partition(' ')[::2]
if auevent != 'BufWipeout' or pattern != '*':
raise NotImplementedError
import sys
if sys.version_info < (3,):
if not aucmd.startswith(':python '):
raise NotImplementedError
else:
if not aucmd.startswith(':python3 '):
raise NotImplementedError
_on_wipeout.append(aucmd.partition(' ')[2])
elif cmd.startswith('set '):
if cmd.startswith('set statusline='):
options['statusline'] = cmd[len('set statusline='):]
elif cmd.startswith('set tabline='):
options['tabline'] = cmd[len('set tabline='):]
else:
raise NotImplementedError(cmd)
else:
raise NotImplementedError(cmd)
@_vim
@_unicode
def eval(expr):
if expr.startswith('g:'):
return vars[expr[2:]]
elif expr.startswith('v:'):
return vvars[expr[2:]]
elif expr.startswith('&'):
return options[expr[1:]]
elif expr.startswith('$'):
return _environ[expr[1:]]
elif expr.startswith('PowerlineRegisterCachePurgerEvent'):
_buf_purge_events.add(expr[expr.find('"') + 1:expr.rfind('"') - 1])
return '0'
elif expr.startswith('exists('):
return '0'
elif expr.startswith('getwinvar('):
import re
match = re.match(r'^getwinvar\((\d+), "(\w+)"\)$', expr)
if not match:
raise NotImplementedError(expr)
winnr = int(match.group(1))
varname = match.group(2)
return _emul_getwinvar(winnr, varname)
elif expr.startswith('has_key('):
import re
match = re.match(r'^has_key\(getwinvar\((\d+), ""\), "(\w+)"\)$', expr)
if match:
winnr = int(match.group(1))
varname = match.group(2)
return 0 + (varname in current.tabpage.windows[winnr].vars)
else:
match = re.match(r'^has_key\(gettabwinvar\((\d+), (\d+), ""\), "(\w+)"\)$', expr)
if not match:
raise NotImplementedError(expr)
tabnr = int(match.group(1))
winnr = int(match.group(2))
varname = match.group(3)
return 0 + (varname in tabpages[tabnr].windows[winnr].vars)
elif expr == 'getbufvar("%", "NERDTreeRoot").path.str()':
import os
assert os.path.basename(current.buffer.name).startswith('NERD_tree_')
return '/usr/include'
elif expr == 'tabpagenr()':
return current.tabpage.number
elif expr == 'tabpagenr("$")':
return len(tabpages)
elif expr.startswith('tabpagewinnr('):
tabnr = int(expr[len('tabpagewinnr('):-1])
return tabpages[tabnr].window.number
elif expr.startswith('tabpagebuflist('):
import re
match = re.match(r'tabpagebuflist\((\d+)\)\[(\d+)\]', expr)
tabnr = int(match.group(1))
winnr = int(match.group(2)) + 1
return tabpages[tabnr].windows[winnr].buffer.number
elif expr.startswith('gettabwinvar('):
import re
match = re.match(r'gettabwinvar\((\d+), (\d+), "(\w+)"\)', expr)
tabnr = int(match.group(1))
winnr = int(match.group(2))
varname = match.group(3)
return tabpages[tabnr].windows[winnr].vars[varname]
elif expr.startswith('type(function('):
import re
match = re.match(r'^type\(function\("([^"]+)"\)\) == 2$', expr)
if not match:
raise NotImplementedError(expr)
return 0
raise NotImplementedError(expr)
@_vim
def bindeval(expr):
if expr == 'g:':
return vars
elif expr == '{}':
return {}
elif expr == '[]':
return []
import re
match = re.compile(r'^function\("([^"\\]+)"\)$').match(expr)
if match:
return globals()['_emul_' + match.group(1)]
else:
raise NotImplementedError
@_vim
@_str_func
def _emul_mode(*args):
if args and args[0]:
return _mode
else:
return _mode[0]
@_vim
@_str_func
def _emul_getbufvar(bufnr, varname):
import re
if varname[0] == '&':
if bufnr == '%':
bufnr = current.buffer.number
if bufnr not in buffers:
return ''
try:
return buffers[bufnr].options[varname[1:]]
except KeyError:
try:
return options[varname[1:]]
except KeyError:
return ''
elif re.match('^[a-zA-Z_]+$', varname):
if bufnr == '%':
bufnr = current.buffer.number
if bufnr not in buffers:
return ''
return buffers[bufnr].vars[varname]
raise NotImplementedError
@_vim
@_str_func
def _emul_getwinvar(winnr, varname):
return current.tabpage.windows[winnr].vars.get(varname, '')
@_vim
def _emul_setwinvar(winnr, varname, value):
current.tabpage.windows[winnr].vars[varname] = value
@_vim
def _emul_virtcol(expr):
if expr == '.':
return current.window.cursor[1] + 1
if isinstance(expr, list) and len(expr) == 3:
return expr[-2] + expr[-1]
raise NotImplementedError
_v_pos = None
@_vim
def _emul_getpos(expr):
if expr == '.':
return [0, current.window.cursor[0] + 1, current.window.cursor[1] + 1, 0]
if expr == 'v':
return _v_pos or [0, current.window.cursor[0] + 1, current.window.cursor[1] + 1, 0]
raise NotImplementedError
@_vim
@_str_func
def _emul_fnamemodify(path, modstring):
import os
_modifiers = {
'~': lambda path: path.replace(os.environ['HOME'].encode('utf-8'), b'~') if path.startswith(os.environ['HOME'].encode('utf-8')) else path,
'.': lambda path: (lambda tpath: path if tpath[:3] == b'..' + os.sep.encode() else tpath)(os.path.relpath(path)),
't': lambda path: os.path.basename(path),
'h': lambda path: os.path.dirname(path),
}
for mods in modstring.split(':')[1:]:
path = _modifiers[mods](path)
return path
@_vim
@_str_func
def _emul_expand(expr):
global _abuf
if expr == '<abuf>':
return _abuf or current.buffer.number
raise NotImplementedError
@_vim
def _emul_bufnr(expr):
if expr == '$':
return _last_bufnr
raise NotImplementedError
@_vim
def _emul_exists(ident):
if ident.startswith('g:'):
return ident[2:] in vars
elif ident.startswith(':'):
return 0
raise NotImplementedError
@_vim
def _emul_line2byte(line):
buflines = current.buffer._buf_lines
if line == len(buflines) + 1:
return sum((len(s) for s in buflines)) + 1
raise NotImplementedError
@_vim
def _emul_line(expr):
cursorline = current.window.cursor[0] + 1
numlines = len(current.buffer._buf_lines)
if expr == 'w0':
return max(cursorline - 5, 1)
if expr == 'w$':
return min(cursorline + 5, numlines)
raise NotImplementedError
@_vim
@_str_func
def _emul_strtrans(s):
# FIXME Do more replaces
return s.replace(b'\xFF', b'<ff>')
@_vim
@_str_func
def _emul_bufname(bufnr):
try:
return buffers[bufnr]._name or b''
except KeyError:
return b''
_window_id = 0
class _Window(object):
def __init__(self, number, buffer=None, cursor=(1, 0), width=80):
global _window_id
self.cursor = cursor
self.width = width
self.number = number
if buffer:
if type(buffer) is _Buffer:
self.buffer = buffer
else:
self.buffer = _Buffer(**buffer)
else:
self.buffer = _Buffer()
_window_id += 1
self._window_id = _window_id
self.options = {}
self.vars = {
'powerline_window_id': self._window_id,
}
def __repr__(self):
return '<window ' + str(self.number - 1) + '>'
class _Tabpage(object):
def __init__(self, number):
self.windows = _ObjList(_Window)
self.number = number
def _new_window(self, **kwargs):
self.window = self.windows._new(**kwargs)
return self.window
def _close_window(self, winnr, open_window=True):
curwinnr = self.window.number
win = self.windows._pop(winnr)
if self.windows and winnr == curwinnr:
self.window = self.windows[-1]
elif open_window:
current.tabpage._new_window()
return win
def _close(self):
global _tabpage
while self.windows:
self._close_window(1, False)
tabpages._pop(self.number)
_tabpage = len(tabpages)
tabpages = _ObjList(_Tabpage)
_abuf = None
class _Buffer(object):
def __init__(self, name=None):
global _last_bufnr
_last_bufnr += 1
bufnr = _last_bufnr
self.number = bufnr
# FIXME Use unicode() for python-3
self.name = name
self.vars = {'changedtick': 1}
self.options = {
'modified': 0,
'readonly': 0,
'fileformat': 'unix',
'filetype': '',
'buftype': '',
'fileencoding': 'utf-8',
'textwidth': 80,
}
self._buf_lines = ['']
self._undostate = [self._buf_lines[:]]
self._undo_written = len(self._undostate)
buffers[bufnr] = self
@property
def name(self):
import sys
if sys.version_info < (3,):
return self._name
else:
return str(self._name, 'utf-8') if self._name else None
@name.setter
def name(self, name):
if name is None:
self._name = None
else:
import os
if type(name) is not bytes:
name = name.encode('utf-8')
if b':/' in name:
self._name = name
else:
self._name = os.path.abspath(name)
def __getitem__(self, line):
return self._buf_lines[line]
def __setitem__(self, line, value):
self.options['modified'] = 1
self.vars['changedtick'] += 1
self._buf_lines[line] = value
from copy import copy
self._undostate.append(copy(self._buf_lines))
def __setslice__(self, *args):
self.options['modified'] = 1
self.vars['changedtick'] += 1
self._buf_lines.__setslice__(*args)
from copy import copy
self._undostate.append(copy(self._buf_lines))
def __getslice__(self, *args):
return self._buf_lines.__getslice__(*args)
def __len__(self):
return len(self._buf_lines)
def __repr__(self):
return '<buffer ' + str(self.name) + '>'
def __del__(self):
global _abuf
bufnr = self.number
try:
import __main__
except ImportError:
pass
except RuntimeError:
# Module may have already been garbage-collected
pass
else:
if _on_wipeout:
_abuf = bufnr
try:
for event in _on_wipeout:
exec(event, __main__.__dict__)
finally:
_abuf = None
class _Current(object):
@property
def buffer(self):
return self.window.buffer
@property
def window(self):
return self.tabpage.window
@property
def tabpage(self):
return tabpages[_tabpage - 1]
current = _Current()
_dict = None
@_vim
def _init():
global _dict
if _dict:
return _dict
_dict = {}
for varname, value in globals().items():
if varname[0] != '_':
_dict[varname] = value
_tabnew()
return _dict
@_vim
def _get_segment_info():
mode_translations = {
chr(ord('V') - 0x40): '^V',
chr(ord('S') - 0x40): '^S',
}
mode = _mode
mode = mode_translations.get(mode, mode)
window = current.window
buffer = current.buffer
tabpage = current.tabpage
return {
'window': window,
'winnr': window.number,
'buffer': buffer,
'bufnr': buffer.number,
'tabpage': tabpage,
'tabnr': tabpage.number,
'window_id': window._window_id,
'mode': mode,
'encoding': options['encoding'],
}
@_vim
def _launch_event(event):
pass
@_vim
def _start_mode(mode):
global _mode
if mode == 'i':
_launch_event('InsertEnter')
elif _mode == 'i':
_launch_event('InsertLeave')
_mode = mode
@_vim
def _undo():
if len(current.buffer._undostate) == 1:
return
buffer = current.buffer
buffer._undostate.pop(-1)
buffer._buf_lines = buffer._undostate[-1]
if buffer._undo_written == len(buffer._undostate):
buffer.options['modified'] = 0
@_vim
def _edit(name=None):
if current.buffer.name is None:
buffer = current.buffer
buffer.name = name
else:
buffer = _Buffer(name)
current.window.buffer = buffer
@_vim
def _tabnew(name=None):
global windows
global _tabpage
tabpage = tabpages._new()
windows = tabpage.windows
_tabpage = len(tabpages)
_new(name)
return tabpage
@_vim
def _new(name=None):
current.tabpage._new_window(buffer={'name': name})
@_vim
def _split():
current.tabpage._new_window(buffer=current.buffer)
@_vim
def _close(winnr, wipe=True):
win = current.tabpage._close_window(winnr)
if wipe:
for w in current.tabpage.windows:
if w.buffer.number == win.buffer.number:
break
else:
_bw(win.buffer.number)
@_vim
def _bw(bufnr=None):
bufnr = bufnr or current.buffer.number
winnr = 1
for win in current.tabpage.windows:
if win.buffer.number == bufnr:
_close(winnr, wipe=False)
winnr += 1
buffers._pop(bufnr)
if not buffers:
_Buffer()
_b(max(buffers._keys()))
@_vim
def _b(bufnr):
current.window.buffer = buffers[bufnr]
@_vim
def _set_cursor(line, col):
current.window.cursor = (line, col)
if _mode == 'n':
_launch_event('CursorMoved')
elif _mode == 'i':
_launch_event('CursorMovedI')
@_vim
def _get_buffer():
return current.buffer
@_vim
def _set_bufoption(option, value, bufnr=None):
buffers[bufnr or current.buffer.number].options[option] = value
if option == 'filetype':
_launch_event('FileType')
class _WithNewBuffer(object):
def __init__(self, func, *args, **kwargs):
self.call = lambda: func(*args, **kwargs)
def __enter__(self):
self.call()
self.bufnr = current.buffer.number
return _get_segment_info()
def __exit__(self, *args):
_bw(self.bufnr)
@_vim
def _set_dict(d, new, setfunc=None):
if not setfunc:
def setfunc(k, v):
d[k] = v
old = {}
na = []
for k, v in new.items():
try:
old[k] = d[k]
except KeyError:
na.append(k)
setfunc(k, v)
return old, na
class _WithBufOption(object):
def __init__(self, **new):
self.new = new
def __enter__(self):
self.buffer = current.buffer
self.old = _set_dict(self.buffer.options, self.new, _set_bufoption)[0]
def __exit__(self, *args):
self.buffer.options.update(self.old)
class _WithMode(object):
def __init__(self, new):
self.new = new
def __enter__(self):
self.old = _mode
_start_mode(self.new)
return _get_segment_info()
def __exit__(self, *args):
_start_mode(self.old)
class _WithDict(object):
def __init__(self, d, **new):
self.new = new
self.d = d
def __enter__(self):
self.old, self.na = _set_dict(self.d, self.new)
def __exit__(self, *args):
self.d.update(self.old)
for k in self.na:
self.d.pop(k)
class _WithSplit(object):
def __enter__(self):
_split()
def __exit__(self, *args):
_close(2, wipe=False)
class _WithBufName(object):
def __init__(self, new):
self.new = new
def __enter__(self):
import os
buffer = current.buffer
self.buffer = buffer
self.old = buffer.name
buffer.name = self.new
def __exit__(self, *args):
self.buffer.name = self.old
class _WithNewTabPage(object):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def __enter__(self):
self.tab = _tabnew(*self.args, **self.kwargs)
def __exit__(self, *args):
self.tab._close()
class _WithGlobal(object):
def __init__(self, **kwargs):
self.kwargs = kwargs
def __enter__(self):
self.empty = object()
self.old = dict(((key, globals().get(key, self.empty)) for key in self.kwargs))
globals().update(self.kwargs)
def __exit__(self, *args):
for k, v in self.old.items():
if v is self.empty:
globals().pop(k, None)
else:
globals()[k] = v
@_vim
def _with(key, *args, **kwargs):
if key == 'buffer':
return _WithNewBuffer(_edit, *args, **kwargs)
elif key == 'bufname':
return _WithBufName(*args, **kwargs)
elif key == 'mode':
return _WithMode(*args, **kwargs)
elif key == 'bufoptions':
return _WithBufOption(**kwargs)
elif key == 'options':
return _WithDict(options, **kwargs)
elif key == 'globals':
return _WithDict(vars, **kwargs)
elif key == 'wvars':
return _WithDict(current.window.vars, **kwargs)
elif key == 'environ':
return _WithDict(_environ, **kwargs)
elif key == 'split':
return _WithSplit()
elif key == 'tabpage':
return _WithNewTabPage(*args, **kwargs)
elif key == 'vpos':
return _WithGlobal(_v_pos=[0, kwargs['line'], kwargs['col'], kwargs['off']])
class error(Exception):
pass
| mit |
fbradyirl/home-assistant | tests/components/light/test_device_automation.py | 1 | 5092 | """The test for light device automation."""
import pytest
from homeassistant.components import light
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from homeassistant.setup import async_setup_component
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import (
async_get_device_automation_triggers,
)
from homeassistant.helpers import device_registry
from tests.common import (
MockConfigEntry,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock serivce."""
return async_mock_service(hass, "test", "automation")
def _same_triggers(a, b):
if len(a) != len(b):
return False
for d in a:
if d not in b:
return False
return True
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a light."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create("light", "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": "light",
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
{
"platform": "device",
"domain": "light",
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "light.test_5678",
},
]
triggers = await async_get_device_automation_triggers(hass, device_entry.id)
assert _same_triggers(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
platform = getattr(hass.components, "test.light")
platform.init()
assert await async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
dev1, dev2, dev3 = platform.DEVICES
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": light.DOMAIN,
"entity_id": dev1.entity_id,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_on {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": light.DOMAIN,
"entity_id": dev1.entity_id,
"type": "turn_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "turn_off {{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"for",
)
)
},
},
},
]
},
)
await hass.async_block_till_done()
assert hass.states.get(dev1.entity_id).state == STATE_ON
assert len(calls) == 0
hass.states.async_set(dev1.entity_id, STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_off state - {} - on - off - None".format(
dev1.entity_id
)
hass.states.async_set(dev1.entity_id, STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_on state - {} - off - on - None".format(
dev1.entity_id
)
| apache-2.0 |
ArcherSys/ArcherSys | eclipse/plugins/org.python.pydev_4.5.5.201603221110/pysrc/third_party/pep8/lib2to3/lib2to3/pgen2/driver.py | 212 | 5164 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <[email protected]>"
__all__ = ["Driver", "load_grammar"]
# Python imports
import codecs
import os
import logging
import StringIO
import sys
# Pgen imports
from . import grammar, parse, token, tokenize, pgen
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p.setup()
lineno = 1
column = 0
type = value = start = end = line_text = None
prefix = u""
for quintuple in tokens:
type, value, start, end, line_text = quintuple
if start != (lineno, column):
assert (lineno, column) <= start, ((lineno, column), start)
s_lineno, s_column = start
if lineno < s_lineno:
prefix += "\n" * (s_lineno - lineno)
lineno = s_lineno
column = 0
if column < s_column:
prefix += line_text[column:s_column]
column = s_column
if type in (tokenize.COMMENT, tokenize.NL):
prefix += value
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
continue
if type == token.OP:
type = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type], value, prefix)
if p.addtoken(type, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
break
prefix = ""
lineno, column = end
if value.endswith("\n"):
lineno += 1
column = 0
else:
# We never broke out -- EOF is too soon (how can this happen???)
raise parse.ParseError("incomplete input",
type, value, (prefix, start))
return p.rootnode
def parse_stream_raw(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
tokens = tokenize.generate_tokens(stream.readline)
return self.parse_tokens(tokens, debug)
def parse_stream(self, stream, debug=False):
"""Parse a stream and return the syntax tree."""
return self.parse_stream_raw(stream, debug)
def parse_file(self, filename, encoding=None, debug=False):
"""Parse a file and return the syntax tree."""
stream = codecs.open(filename, "r", encoding)
try:
return self.parse_stream(stream, debug)
finally:
stream.close()
def parse_string(self, text, debug=False):
"""Parse a string and return the syntax tree."""
tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline)
return self.parse_tokens(tokens, debug)
def load_grammar(gt="Grammar.txt", gp=None,
save=True, force=False, logger=None):
"""Load the grammar (maybe from a pickle)."""
if logger is None:
logger = logging.getLogger()
if gp is None:
head, tail = os.path.splitext(gt)
if tail == ".txt":
tail = ""
gp = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
if force or not _newer(gp, gt):
logger.info("Generating grammar tables from %s", gt)
g = pgen.generate_grammar(gt)
if save:
logger.info("Writing grammar tables to %s", gp)
try:
g.dump(gp)
except IOError, e:
logger.info("Writing failed:"+str(e))
else:
g = grammar.Grammar()
g.load(gp)
return g
def _newer(a, b):
"""Inquire whether file a was written since file b."""
if not os.path.exists(a):
return False
if not os.path.exists(b):
return True
return os.path.getmtime(a) >= os.path.getmtime(b)
def main(*args):
"""Main program, when run as a script: produce grammar pickle files.
Calls load_grammar for each argument, a path to a grammar text file.
"""
if not args:
args = sys.argv[1:]
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format='%(message)s')
for gt in args:
load_grammar(gt, save=True, force=True)
return True
if __name__ == "__main__":
sys.exit(int(not main()))
| mit |
treesnail/tushare | test/macro_test.py | 40 | 1185 | # -*- coding:utf-8 -*-
'''
Created on 2015/3/14
@author: Jimmy Liu
'''
import unittest
import tushare.stock.macro as fd
class Test(unittest.TestCase):
def test_get_gdp_year(self):
print(fd.get_gdp_year())
def test_get_gdp_quarter(self):
print(fd.get_gdp_quarter())
def test_get_gdp_for(self):
print(fd.get_gdp_for())
def test_get_gdp_pull(self):
print(fd.get_gdp_pull())
def test_get_gdp_contrib(self):
print(fd.get_gdp_contrib())
def test_get_cpi(self):
print(fd.get_cpi())
def test_get_ppi(self):
print(fd.get_ppi())
def test_get_deposit_rate(self):
print(fd.get_deposit_rate())
def test_get_loan_rate(self):
print(fd.get_loan_rate())
def test_get_rrr(self):
print(fd.get_rrr())
def test_get_money_supply(self):
print(fd.get_money_supply())
def test_get_money_supply_bal(self):
print(fd.get_money_supply_bal())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | bsd-3-clause |
timthelion/FreeCAD | src/Mod/Path/PathScripts/PathLoadTool.py | 2 | 11946 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * *
# * Copyright (c) 2015 Dan Falck <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
''' Tool Controller defines tool, spindle speed and feed rates for Path Operations '''
import FreeCAD
import FreeCADGui
import Path
# import PathGui
import PathScripts
import PathUtils
# from PathScripts import PathProject
from PySide import QtCore, QtGui
# Qt tanslation handling
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def translate(context, text, disambig=None):
return QtGui.QApplication.translate(context, text, disambig)
class LoadTool:
def __init__(self, obj):
obj.addProperty("App::PropertyIntegerConstraint", "ToolNumber", "Tool", "The active tool")
obj.ToolNumber = (0, 0, 10000, 1)
obj.addProperty("App::PropertyFloat", "SpindleSpeed", "Tool", "The speed of the cutting spindle in RPM")
obj.addProperty("App::PropertyEnumeration", "SpindleDir", "Tool", "Direction of spindle rotation")
obj.SpindleDir = ['Forward', 'Reverse']
obj.addProperty("App::PropertySpeed", "VertFeed", "Feed", "Feed rate for vertical moves in Z")
obj.addProperty("App::PropertySpeed", "HorizFeed", "Feed", "Feed rate for horizontal moves")
obj.Proxy = self
mode = 2
obj.setEditorMode('Placement', mode)
def execute(self, obj):
# if obj.ToolNumber != 0:
tool = PathUtils.getTool(obj, obj.ToolNumber)
if tool is not None:
obj.Label = obj.Name + ": (" + tool.Name + ")"
else:
obj.Label = obj.Name + ": (UNDEFINED TOOL)"
commands = ""
commands = 'M6T'+str(obj.ToolNumber)+'\n'
if obj.SpindleDir == 'Forward':
commands += 'M3S' + str(obj.SpindleSpeed) + '\n'
else:
commands += 'M4S' + str(obj.SpindleSpeed) + '\n'
obj.Path = Path.Path(commands)
# obj.Label = "TC: Tool"+str(obj.ToolNumber)
def onChanged(self, obj, prop):
mode = 2
obj.setEditorMode('Placement', mode)
# if prop == "ToolNumber":
proj = PathUtils.findProj()
for g in proj.Group:
if not(isinstance(g.Proxy, PathScripts.PathLoadTool.LoadTool)):
g.touch()
class _ViewProviderLoadTool:
def __init__(self, vobj):
vobj.Proxy = self
mode = 2
vobj.setEditorMode('LineWidth', mode)
vobj.setEditorMode('MarkerColor', mode)
vobj.setEditorMode('NormalColor', mode)
vobj.setEditorMode('ShowFirstRapid', mode)
vobj.setEditorMode('DisplayMode', mode)
vobj.setEditorMode('BoundingBox', mode)
vobj.setEditorMode('Selectable', mode)
vobj.setEditorMode('ShapeColor', mode)
vobj.setEditorMode('Transparency', mode)
vobj.setEditorMode('Visibility', mode)
def __getstate__(self):
return None
def __setstate__(self, state):
return None
def getIcon(self):
return ":/icons/Path-LoadTool.svg"
def onChanged(self, vobj, prop):
mode = 2
vobj.setEditorMode('LineWidth', mode)
vobj.setEditorMode('MarkerColor', mode)
vobj.setEditorMode('NormalColor', mode)
vobj.setEditorMode('ShowFirstRapid', mode)
vobj.setEditorMode('DisplayMode', mode)
vobj.setEditorMode('BoundingBox', mode)
vobj.setEditorMode('Selectable', mode)
def updateData(self, vobj, prop):
# this is executed when a property of the APP OBJECT changes
pass
def setEdit(self, vobj, mode):
# this is executed when the object is double-clicked in the tree
FreeCADGui.Control.closeDialog()
taskd = TaskPanel()
taskd.obj = vobj.Object
FreeCADGui.Control.showDialog(taskd)
taskd.setupUi()
return True
def unsetEdit(self, vobj, mode):
# this is executed when the user cancels or terminates edit mode
pass
class CommandPathLoadTool:
def GetResources(self):
return {'Pixmap': 'Path-LoadTool',
'MenuText': QtCore.QT_TRANSLATE_NOOP("Path_LoadTool", "Add Tool Controller to the Project"),
'Accel': "P, T",
'ToolTip': QtCore.QT_TRANSLATE_NOOP("Path_LoadTool", "Add Tool Controller")}
def IsActive(self):
return FreeCAD.ActiveDocument is not None
def Activated(self):
FreeCAD.ActiveDocument.openTransaction(translate("Path_LoadTool", "Create Tool Controller Object"))
snippet = '''
import Path, PathScripts
from PathScripts import PathUtils, PathLoadTool
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython","TC")
PathScripts.PathLoadTool.LoadTool(obj)
PathScripts.PathLoadTool._ViewProviderLoadTool(obj.ViewObject)
PathUtils.addToProject(obj)
'''
FreeCADGui.doCommand(snippet)
FreeCAD.ActiveDocument.commitTransaction()
FreeCAD.ActiveDocument.recompute()
@staticmethod
def Create():
# FreeCADGui.addModule("PathScripts.PathLoadTool")
# import Path
import PathScripts
import PathUtils
obj = FreeCAD.ActiveDocument.addObject("Path::FeaturePython", "TC")
PathScripts.PathLoadTool.LoadTool(obj)
PathScripts.PathLoadTool._ViewProviderLoadTool(obj.ViewObject)
PathUtils.addToProject(obj)
class TaskPanel:
def __init__(self):
self.form = FreeCADGui.PySideUic.loadUi(":/panels/ToolControl.ui")
#self.form = FreeCADGui.PySideUic.loadUi(FreeCAD.getHomePath() + "Mod/Path/ToolControl.ui")
self.updating = False
def accept(self):
self.getFields()
FreeCADGui.ActiveDocument.resetEdit()
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def reject(self):
FreeCADGui.Control.closeDialog()
FreeCAD.ActiveDocument.recompute()
FreeCADGui.Selection.removeObserver(self.s)
def getFields(self):
if self.obj:
if hasattr(self.obj, "VertFeed"):
self.obj.Label = self.form.tcoName.text()
if hasattr(self.obj, "VertFeed"):
self.obj.VertFeed = self.form.vertFeed.text()
if hasattr(self.obj, "HorizFeed"):
self.obj.HorizFeed = self.form.horizFeed.text()
if hasattr(self.obj, "SpindleSpeed"):
self.obj.SpindleSpeed = self.form.spindleSpeed.value()
if hasattr(self.obj, "SpindleDir"):
self.obj.SpindleDir = str(self.form.cboSpindleDirection.currentText())
#if hasattr(self.obj, "ToolNumber"):
# self.obj.ToolNumber = self.form.ToolNumber.value()
self.obj.Proxy.execute(self.obj)
def setFields(self):
self.form.vertFeed.setText(str(self.obj.VertFeed.Value))
self.form.horizFeed.setText(str(self.obj.HorizFeed.Value))
self.form.spindleSpeed.setValue(self.obj.SpindleSpeed)
self.form.tcoName.setText(str(self.obj.Label))
index = self.form.cboSpindleDirection.findText(self.obj.SpindleDir, QtCore.Qt.MatchFixedString)
if index >= 0:
self.form.cboSpindleDirection.setCurrentIndex(index)
# Populate the tool list
mach = PathUtils.findMachine()
try:
tool = mach.Tooltable.Tools[self.obj.ToolNumber]
self.form.txtToolName.setText(tool.Name)
self.form.txtToolType.setText(tool.ToolType)
self.form.txtToolMaterial.setText(tool.Material)
self.form.txtToolDiameter.setText(str(tool.Diameter))
except:
self.form.txtToolName.setText("UNDEFINED")
self.form.txtToolType.setText("UNDEFINED")
self.form.txtToolMaterial.setText("UNDEFINED")
self.form.txtToolDiameter.setText("UNDEFINED")
# self.form.cboToolSelect.addItem(tool.Name)
# index = self.form.cboToolSelect.findText(self.obj.SpindleDir, QtCore.Qt.MatchFixedString)
# if index >= 0:
# self.form.cboSpindleDirection.setCurrentIndex(index)
def open(self):
self.s = SelObserver()
# install the function mode resident
FreeCADGui.Selection.addObserver(self.s)
def getStandardButtons(self):
return int(QtGui.QDialogButtonBox.Ok)
def edit(self, item, column):
if not self.updating:
self.resetObject()
def resetObject(self, remove=None):
"transfers the values from the widget to the object"
# loc = []
# h = []
# l = []
# a = []
# for i in range(self.form.tagTree.topLevelItemCount()):
# it = self.form.tagTree.findItems(
# str(i+1), QtCore.Qt.MatchExactly, 0)[0]
# if (remove is None) or (remove != i):
# if it.text(1):
# x = float(it.text(1).split()[0].rstrip(","))
# y = float(it.text(1).split()[1].rstrip(","))
# z = float(it.text(1).split()[2].rstrip(","))
# loc.append(Vector(x, y, z))
# else:
# loc.append(0.0)
# if it.text(2):
# h.append(float(it.text(2)))
# else:
# h.append(4.0)
# if it.text(3):
# l.append(float(it.text(3)))
# else:
# l.append(5.0)
# if it.text(4):
# a.append(float(it.text(4)))
# else:
# a.append(45.0)
# self.obj.locs = loc
# self.obj.heights = h
# self.obj.lengths = l
# self.obj.angles = a
# self.obj.touch()
FreeCAD.ActiveDocument.recompute()
def setupUi(self):
pass
# Connect Signals and Slots
# Base Controls
# self.form.baseList.itemSelectionChanged.connect(self.itemActivated)
self.setFields()
class SelObserver:
def __init__(self):
pass
def __del__(self):
pass
if FreeCAD.GuiUp:
# register the FreeCAD command
FreeCADGui.addCommand('Path_LoadTool', CommandPathLoadTool())
FreeCAD.Console.PrintLog("Loading PathLoadTool... done\n")
| lgpl-2.1 |
40223117cda/w17test | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/draw.py | 603 | 6456 | from javascript import console
from browser import timer
import math
class Queue:
def __init__(self):
self._list=[]
def empty(self):
return len(self._list) == 0
def put(self, element):
self._list.append(element)
def get(self):
if len(self._list) == 0:
raise BaseError
_element=self._list[0]
if len(self._list) == 1:
self._list=[]
else:
self._list=self._list[1:]
return _element
dm={}
def aaline(canvas, color, startpos, endpos, width, outline, blend=1):
#console.log("aaline")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dl=DrawLine(startpos[0], startpos[1], endpos[0], endpos[1], color,
width, outline, speed=10)
dm[canvas].add_line(_dl) #color, startpos, endpos, width, outline)
def aapolygon(canvas, color, coordinates, width, outline, blend=1):
#console.log("aapolygon")
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
_dp=DrawPolygon(coordinates, color, width, outline, speed=10)
dm[canvas].add_polygon(_dp)
def aapolygon_bg(canvas, shape):
if canvas not in dm:
dm[canvas]=DrawManager(canvas)
dm[canvas].process()
dm[canvas].add_polygon_bg(shape)
class DrawPolygon:
def __init__(self, coordinates, color, width, outline, speed=10):
self.moveTo=coordinates[0]
self.segments=coordinates[1:]
self.color=color
self.width=width
self.outline=outline
class DrawLine:
def __init__(self, x0, y0, x1, y1, color, width, outline, speed=None):
self._type='LINE'
self._x0=x0
self._x1=x1
self._y0=y0
self._y1=y1
self._speed=speed
self._color=color
self._width=width
self._outline=outline
def get_segments(self):
if self._speed==0: #no animate since speed is 0 (return one segment)
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
#need to figure out how to translate speed into pixels, etc
#maybe speed is pixels per ms? 10 = 10 pixels per millisecond?
_x=(self._x1 - self._x0)
_x*=_x
_y=(self._y1 - self._y0)
_y*=_y
_distance=math.sqrt(_x + _y)
if _distance < self._speed: # we can do this in one segment
return [{'type': self._type, 'x0':self._x0, 'y0': self._y0,
'x1': self._x1, 'y1': self._y1, 'color': self._color}]
_segments=[]
_num_segments=math.floor(_distance/self._speed)
_pos_x=self._x0
_pos_y=self._y0
_x_diff=self._x1 - self._x0
_y_diff=self._y1 - self._y0
for _i in range(1,_num_segments+1):
_x=self._x0 + _i/_num_segments * _x_diff
_y=self._y0 + _i/_num_segments * _y_diff
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
_pos_x=_x
_pos_y=_y
if _pos_x != self._x1 or _pos_y != self._y1:
_segments.append({'type': 'LINE': 'x0': _pos_x, 'y0': _pos_y,
'x1': _x, 'y1': _y, 'color': self._color})
return _segments
class DrawManager:
def __init__(self, canvas):
self._queue=Queue()
self._canvas=canvas
self._ctx=canvas.getContext('2d')
self._interval=None
self._bg=None #used to capture bg before polygon is drawn
def __del__(self):
if self._interval is not None:
timer.clear_Interval(self._interval)
self._interval=None
del self._queue
def rect_from_shape(self, points):
_width=self._canvas.width
_height=self._canvas.height
_min_x=_width
_max_x=0
_min_y=_height
_max_y=0
for _point in points:
_x, _y = _point
_min_x=min(_min_x, _x)
_min_y=min(_min_y, _y)
_max_x=max(_max_x, _x)
_max_y=max(_max_y, _y)
_w2=_width/2
_h2=_height/2
return math.floor(_min_x-0.5)+_w2, math.floor(_min_y-0.5+_h2), \
math.ceil(_max_x+0.5)+_w2, math.ceil(_max_y+0.5+_h2)
def __interval(self):
if not self._queue.empty():
_dict=self._queue.get()
if _dict['type'] == 'LINE':
self._ctx.beginPath()
self._ctx.moveTo(_dict['x0'], _dict['y0'])
self._ctx.lineTo(_dict['x1'], _dict['y1'])
#if _dict['outline'] is not None:
# self._ctx.strokeStyle=_dict['outline'] #set line color
if _dict['color'] is not None:
self._ctx.fillStyle=_dict['color']
self._ctx.stroke()
elif _dict['type'] == 'POLYGON':
if self._bg is not None:
self._ctx.putImageData(self._bg[0], self._bg[1], self._bg[2])
console.log(self._bg[0])
self._bg=None
self._ctx.beginPath()
_moveTo=_dict['moveTo']
self._ctx.moveTo(_moveTo[0], _moveTo[1])
for _segment in _dict['segments']:
self._ctx.lineTo(_segment[0], _segment[1])
if _dict['width']:
self._ctx.lineWidth=_dict['width']
if _dict['outline']:
self._ctx.strokeStyle=_dict['outline']
if _dict['color']:
self._ctx.fillStyle=_dict['color']
self._ctx.fill()
self._ctx.closePath()
self._ctx.stroke()
elif _dict['type'] == 'POLYGON_BG':
_x0,_y0,_x1,_y1=self.rect_from_shape(_dict['shape'])
console.log(_x0,_y0,_x1, _y1)
self._bg=[]
self._bg.append(self._ctx.getImageData(_x0,_y0,abs(_x1)-abs(_x0),abs(_y1)-abs(_y0)))
self._bg.append(_x0)
self._bg.append(_y0)
def process(self):
self._interval=timer.set_interval(self.__interval, 10)
def add_line(self, dl): #color, startpos, endpos, width, outline, speed=None):
for _segment in dl.get_segments():
self._queue.put(_segment)
def add_polygon(self, dp):
self._queue.put({'type': 'POLYGON', 'moveTo': dp.moveTo,
'segments': dp.segments, 'color': dp.color,
'outline': dp.outline, 'width': dp.width})
def add_polygon_bg(self, shape):
self._queue.put({'type': 'POLYGON_BG', 'shape': shape})
| gpl-3.0 |
SiccarPoint/landlab | landlab/grid/structured_quad/links.py | 1 | 83060 | import numpy as np
from . import nodes
from ..base import CORE_NODE, FIXED_GRADIENT_BOUNDARY, FIXED_VALUE_BOUNDARY
from ..unstructured.links import LinkGrid
from ...core.utils import as_id_array
def neighbors_at_link(shape, links):
"""Get neighbor links.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.structured_quad.links import neighbors_at_link
>>> neighbors_at_link((3, 2), np.arange(7)) # doctest: +NORMALIZE_WHITESPACE
array([[-1, 3, -1, -1],
[ 2, 4, -1, -1], [-1, 5, 1, -1],
[-1, 6, -1, 0],
[ 5, 7, -1, 1], [-1, -1, 4, 2],
[-1, -1, -1, 3]])
"""
from .cfuncs import _neighbors_at_link
links = np.asarray(links, dtype=int)
out = np.full((links.size, 4), -1, dtype=int)
_neighbors_at_link(links, shape, out)
return out
def shape_of_vertical_links(shape):
"""Shape of vertical link grid.
Number of rows and columns of *vertical* links that connect nodes in a
structured grid of quadrilaterals.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple of int :
Shape of the vertical links in grid.
Examples
--------
>>> from landlab.grid.structured_quad.links import shape_of_vertical_links
>>> shape_of_vertical_links((3, 4))
(2, 4)
"""
return (shape[0] - 1, shape[1])
def shape_of_horizontal_links(shape):
"""Shape of horizontal link grid.
Number of rows and columns of *horizontal* links that connect nodes in a
structured grid of quadrilaterals.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple of int :
Shape of the horizontal links in grid.
Examples
--------
>>> from landlab.grid.structured_quad.links import (
... shape_of_horizontal_links)
>>> shape_of_horizontal_links((3, 4))
(3, 3)
"""
return (shape[0], shape[1] - 1)
def number_of_vertical_links(shape):
"""Number of vertical links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of vertical links in grid.
Examples
--------
>>> from landlab.grid.structured_quad.links import number_of_vertical_links
>>> number_of_vertical_links((3, 4))
8
"""
return np.prod(shape_of_vertical_links(shape))
def number_of_horizontal_links(shape):
"""Number of horizontal links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of horizontal links in grid.
Examples
--------
>>> from landlab.grid.structured_quad.links import (
... number_of_horizontal_links)
>>> number_of_horizontal_links((3, 4))
9
"""
return np.prod(shape_of_horizontal_links(shape))
def number_of_links(shape):
"""Number of links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
int :
Number of links in grid.
Examples
--------
>>> from landlab.grid.structured_quad.links import number_of_links
>>> number_of_links((3, 4))
17
"""
return number_of_vertical_links(shape) + number_of_horizontal_links(shape)
def vertical_link_ids(shape):
"""Vertical links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(M, N) ndarray :
Array of link IDs.
Examples
--------
>>> from landlab.grid.structured_quad.links import vertical_link_ids
>>> vertical_link_ids((3, 4))
array([[ 3, 4, 5, 6],
[10, 11, 12, 13]])
"""
#link_ids = np.arange(number_of_vertical_links(shape), dtype=np.int)
#return link_ids.reshape(shape_of_vertical_links(shape))
a = shape[1] - 1 # num horiz links in each row
num_links_per_row = 2*shape[1] - 1 # each row has C-1 horiz + C vert
link_ids = np.zeros(shape_of_vertical_links(shape), dtype=np.int)
for r in range(shape[0]-1): # num rows - 1
link_ids[r,:] = a + (r * num_links_per_row) + np.arange(shape[1])
return link_ids
def horizontal_link_ids(shape):
"""Horizontal links in a structured quad grid.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(M, N) ndarray :
Array of link IDs.
Examples
--------
>>> from landlab.grid.structured_quad.links import horizontal_link_ids
>>> horizontal_link_ids((3, 4))
array([[ 0, 1, 2],
[ 7, 8, 9],
[14, 15, 16]])
"""
num_links_per_row = 2*shape[1] - 1 # each row has C-1 horiz + C vert
link_ids = np.zeros(shape_of_horizontal_links(shape), dtype=np.int)
for r in range(shape[0]): # number of rows
link_ids[r,:] = (r * num_links_per_row) + np.arange(shape[1]-1)
return link_ids
def number_of_links_per_node(shape):
"""Number of links touching each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Array of number of links per node.
Examples
--------
>>> from landlab.grid.structured_quad.links import (
... number_of_links_per_node, number_of_in_links_per_node,
... number_of_out_links_per_node)
>>> number_of_links_per_node((3, 4))
array([[2, 3, 3, 2],
[3, 4, 4, 3],
[2, 3, 3, 2]])
>>> (number_of_in_links_per_node((3, 4)) +
... number_of_out_links_per_node((3, 4)))
array([[2, 3, 3, 2],
[3, 4, 4, 3],
[2, 3, 3, 2]])
"""
link_count = np.empty(shape, np.int)
link_count[1:-1, 1:-1] = 4
link_count[(0, -1), 1:-1] = 3
link_count[1:-1, (0, -1)] = 3
link_count[(0, 0, -1, -1), (0, -1, 0, -1)] = 2
return link_count
def number_of_in_links_per_node(shape):
"""Number of links entering each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Array of number of in-links per node.
Examples
--------
>>> from landlab.grid.structured_quad.links import (
... number_of_in_links_per_node)
>>> number_of_in_links_per_node((3, 4))
array([[0, 1, 1, 1],
[1, 2, 2, 2],
[1, 2, 2, 2]])
"""
link_count = np.empty(shape, np.int)
link_count[1:, 1:] = 2
link_count[0, 0] = 0
link_count[0, 1:] = 1
link_count[1:, 0] = 1
return link_count
def number_of_out_links_per_node(shape):
"""Number of links leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Array of number of out-links per node.
Examples
--------
>>> from landlab.grid.structured_quad.links import (
... number_of_out_links_per_node)
>>> number_of_out_links_per_node((3, 4))
array([[2, 2, 2, 1],
[2, 2, 2, 1],
[1, 1, 1, 0]])
"""
link_count = np.empty(shape, np.int)
link_count[:-1, :-1] = 2
link_count[-1, -1] = 0
link_count[-1, :-1] = 1
link_count[:-1, -1] = 1
return link_count
def _node_out_link_ids(shape):
"""Links leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.structured_quad.links import _node_out_link_ids
>>> (vert, horiz) = _node_out_link_ids((3, 4))
>>> vert
array([[ 3, 4, 5, 6],
[10, 11, 12, 13],
[-1, -1, -1, -1]])
>>> horiz
array([[ 0, 1, 2, -1],
[ 7, 8, 9, -1],
[14, 15, 16, -1]])
"""
node_horizontal_link_ids = np.empty(shape, np.int)
node_horizontal_link_ids[:, :-1] = horizontal_link_ids(shape)
node_horizontal_link_ids[:, -1] = -1
node_vertical_link_ids = np.empty(shape, np.int)
node_vertical_link_ids[:-1, :] = vertical_link_ids(shape)
node_vertical_link_ids[-1, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def _node_in_link_ids(shape):
"""Links entering each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.structured_quad.links import _node_in_link_ids
>>> (vert, horiz) = _node_in_link_ids((3, 4))
>>> vert
array([[-1, -1, -1, -1],
[ 3, 4, 5, 6],
[10, 11, 12, 13]])
>>> horiz
array([[-1, 0, 1, 2],
[-1, 7, 8, 9],
[-1, 14, 15, 16]])
"""
node_horizontal_link_ids = np.empty(shape, np.int)
node_horizontal_link_ids[:, 1:] = horizontal_link_ids(shape)
node_horizontal_link_ids[:, 0] = -1
node_vertical_link_ids = np.empty(shape, np.int)
node_vertical_link_ids[1:, :] = vertical_link_ids(shape)
node_vertical_link_ids[0, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def node_in_link_ids(shape):
"""Links entering each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.structured_quad.links import node_in_link_ids
>>> (links, offset) = node_in_link_ids((3, 4))
>>> links
array([ 0, 1, 2, 3, 4, 7, 5, 8, 6, 9, 10, 11, 14, 12, 15, 13, 16])
>>> offset
array([ 0, 0, 1, 2, 3, 4, 6, 8, 10, 11, 13, 15, 17])
The links entering the 1st, 5th, and last node. The first node does not
have any links entering it.
>>> offset[0] == offset[1]
True
>>> for link in [4, 11]: links[offset[link]:offset[link + 1]]
array([3])
array([13, 16])
"""
(in_vert, in_horiz) = _node_in_link_ids(shape)
_node_link_ids = np.vstack((in_vert.flat, in_horiz.flat)).T
# offset = np.cumsum(number_of_in_links_per_node(shape))
offset = np.empty(nodes.number_of_nodes(shape) + 1, dtype=int)
np.cumsum(number_of_in_links_per_node(shape), out=offset[1:])
offset[0] = 0
return _node_link_ids[_node_link_ids >= 0], offset
def node_out_link_ids(shape):
"""Links leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.structured_quad.links import node_out_link_ids
>>> (links, offset) = node_out_link_ids((3, 4))
>>> links
array([ 3, 0, 4, 1, 5, 2, 6, 10, 7, 11, 8, 12, 9, 13, 14, 15, 16])
>>> offset
array([ 0, 2, 4, 6, 7, 9, 11, 13, 14, 15, 16, 17, 17])
The links leaving the 1st, 8th, and last node. The last node does not have
any links leaving it.
>>> offset[11] == offset[12]
True
>>> for link in [0, 7]: links[offset[link]:offset[link + 1]]
array([3, 0])
array([13])
"""
(out_vert, out_horiz) = _node_out_link_ids(shape)
_node_link_ids = np.vstack((out_vert.flat, out_horiz.flat)).T
offset = np.empty(nodes.number_of_nodes(shape) + 1, dtype=int)
np.cumsum(number_of_out_links_per_node(shape), out=offset[1:])
offset[0] = 0
return _node_link_ids[_node_link_ids >= 0], offset
def links_at_node(shape):
"""Get link ids for each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
(N, 4) ndarray of int
Array of link ids.
Examples
--------
>>> from landlab.grid.structured_quad.links import links_at_node
>>> links_at_node((4, 3)) # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1],
[ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4],
[10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9],
[15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]])
"""
(south_links, west_links) = _node_in_link_ids(shape)
(north_links, east_links) = _node_out_link_ids(shape)
return np.vstack((east_links.flat, north_links.flat,
west_links.flat, south_links.flat)).transpose().copy()
def node_link_ids(shape):
"""Link IDs for links entering and leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs and offsets into link array.
Examples
--------
>>> from landlab.grid.structured_quad.links import node_link_ids
>>> (links, offset) = node_link_ids((3, 4))
>>> links
array([ 0, 3, 1, 4, 0, 2, 5, 1, 6, 2,
7, 10, 3, 8, 11, 7, 4, 9, 12, 8, 5, 13, 9, 6,
14, 10, 15, 14, 11, 16, 15, 12, 16, 13])
>>> offset
array([ 0, 2, 5, 8, 10, 13, 17, 21, 24, 26, 29, 32, 34])
The links attached to node 0
>>> links[offset[0]:offset[1]]
array([0, 3])
The links attached to node 5
>>> links[offset[5]:offset[6]]
array([ 8, 11, 7, 4])
"""
(in_vert, in_horiz) = _node_in_link_ids(shape)
(out_vert, out_horiz) = _node_out_link_ids(shape)
_node_link_ids = np.vstack((out_horiz.flat, out_vert.flat,
in_horiz.flat, in_vert.flat)).T
offset = np.empty(nodes.number_of_nodes(shape) + 1, dtype=int)
np.cumsum(number_of_links_per_node(shape), out=offset[1:])
offset[0] = 0
return _node_link_ids[_node_link_ids >= 0], offset
def node_id_at_link_start(shape):
"""Node ID at start of links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Node IDs at start of links.
Examples
--------
>>> from landlab.grid.structured_quad.links import node_id_at_link_start
>>> node_id_at_link_start((3, 4)) # doctest: +NORMALIZE_WHITESPACE
array([ 0, 1, 2,
0, 1, 2, 3,
4, 5, 6,
4, 5, 6, 7,
8, 9, 10])
"""
all_node_ids = nodes.node_ids(shape)
link_tails_with_extra_row = np.hstack((all_node_ids[:, :-1],
all_node_ids)).reshape((-1, ))
return link_tails_with_extra_row[:-shape[1]]
def node_id_at_link_end(shape):
"""Node ID at end of links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Node IDs at end of links.
Examples
--------
>>> from landlab.grid.structured_quad.links import node_id_at_link_end
>>> node_id_at_link_end((3, 4)) # doctest: +NORMALIZE_WHITESPACE
array([ 1, 2, 3,
4, 5, 6, 7,
5, 6, 7,
8, 9, 10, 11,
9, 10, 11])
"""
all_node_ids = nodes.node_ids(shape)
link_heads_missing_row = np.hstack((all_node_ids[:-1, 1:],
all_node_ids[1:, :])).reshape((-1, ))
return np.concatenate((link_heads_missing_row, all_node_ids[-1, 1:]))
def is_active_link(shape, node_status):
"""Link IDs of active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab.grid.structured_quad.nodes import (
... status_with_perimeter_as_boundary)
>>> from landlab.grid.structured_quad.links import is_active_link
>>> status = status_with_perimeter_as_boundary((3, 4))
>>> status # doctest: +NORMALIZE_WHITESPACE
array([[4, 4, 4, 4],
[4, 0, 0, 4],
[4, 4, 4, 4]])
>>> is_active_link((3, 4), status) # doctest: +NORMALIZE_WHITESPACE
array([False, False, False,
False, False, False, False,
False, True, False,
False, False, False, False,
False, False, False], dtype=bool)
"""
if np.prod(shape) != node_status.size:
raise ValueError('node status array does not match size of grid '
'(%d != %d)' % (np.prod(shape), len(node_status)))
status_at_link_start = node_status.flat[node_id_at_link_start(shape)]
status_at_link_end = node_status.flat[node_id_at_link_end(shape)]
return (((status_at_link_start == CORE_NODE) &
(status_at_link_end == CORE_NODE)) |
((status_at_link_end == CORE_NODE) &
(status_at_link_start == CORE_NODE)) |
((status_at_link_end == CORE_NODE) &
(status_at_link_start == FIXED_VALUE_BOUNDARY)) |
((status_at_link_end == FIXED_VALUE_BOUNDARY) &
(status_at_link_start == CORE_NODE)))
def active_link_ids(shape, node_status):
"""Get active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab.grid import RasterModelGrid
>>> from landlab.grid.structured_quad.links import active_link_ids
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4,
4, 0, 0, 4,
4, 4, 4, 4], dtype=int8)
>>> active_link_ids((3, 4), status)
array([8])
"""
return as_id_array(np.where(is_active_link(shape, node_status))[0])
def is_fixed_link(shape, node_status):
"""ID of active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import is_fixed_link
>>> import numpy as np
>>> rmg = RasterModelGrid((4, 5))
>>> z = np.arange(0, rmg.number_of_nodes)
>>> s = np.arange(0, rmg.number_of_links)
>>> rmg.at_node['topographic__elevation'] = z
>>> rmg.at_link['topographic__slope'] = s
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2,
2, 0, 0, 0, 2,
2, 0, 0, 0, 2,
2, 2, 2, 2, 2], dtype=int8)
>>> is_fixed_link(rmg.shape, rmg.status_at_node)
array([False, False, False, False, False, True, True, True, False,
True, False, False, True, False, False, False, False, False,
True, False, False, True, False, True, True, True, False,
False, False, False, False], dtype=bool)
"""
if np.prod(shape) != node_status.size:
raise ValueError('node status array does not match size of grid '
'(%d != %d)' % (np.prod(shape), len(node_status)))
status_at_link_start = node_status.flat[node_id_at_link_start(shape)]
status_at_link_end = node_status.flat[node_id_at_link_end(shape)]
return (((status_at_link_start == CORE_NODE) &
(status_at_link_end == FIXED_GRADIENT_BOUNDARY)) |
((status_at_link_end == CORE_NODE) &
(status_at_link_start == FIXED_GRADIENT_BOUNDARY)))
def fixed_link_ids(shape, node_status):
"""ID of fixed links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
node_status : array_link
Status of nodes in grid.
Returns
-------
ndarray :
Links IDs at the active links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import fixed_link_ids
>>> import numpy as np
>>> rmg = RasterModelGrid(4, 5)
>>> z = np.arange(0, rmg.number_of_nodes)
>>> s = np.arange(0, rmg.number_of_links)
>>> rmg.at_node['topographic__elevation'] = z
>>> rmg.at_link['topographic__slope'] = s
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> rmg.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2,
2, 0, 0, 0, 2,
2, 0, 0, 0, 2,
2, 2, 2, 2, 2], dtype=int8)
>>> fixed_link_ids(rmg.shape, rmg.status_at_node)
array([ 5, 6, 7, 9, 12, 18, 21, 23, 24, 25])
"""
return as_id_array(np.where(is_fixed_link(shape, node_status))[0])
def horizontal_active_link_ids(shape, active_ids, bad_index_value=-1):
"""ID of horizontal active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
active_ids : array of int
Array of all active link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the HORIZONTAL active links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->o--24-->o--25-->o---I-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*---I-->o--20-->o--21-->o---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to :any:`CLOSED_BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``V`` indicates vertical active ids, which are ignored by this
function.
Numeric values correspond to the horizontal :any:`ACTIVE_LINK` ID.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (active_link_ids,
... horizontal_active_link_ids)
>>> rmg = RasterModelGrid(4, 5)
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4,
4, 0, 0, 0, 4,
4, 0, 0, 0, 4,
4, 4, 4, 4, 4], dtype=int8)
>>> active_ids = active_link_ids((4,5), status)
>>> horizontal_active_link_ids((4,5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1,
-1, 10, 11, -1,
-1, 19, 20, -1,
-1, -1, -1, -1])
"""
out = np.full(number_of_horizontal_links(shape), bad_index_value,
dtype=int)
horizontal_ids = active_ids[np.where(~ is_vertical_link(shape, active_ids))]
out[nth_horizontal_link(shape, horizontal_ids)] = horizontal_ids
return out
def horizontal_fixed_link_ids(shape, fixed_ids, bad_index_value=-1):
"""ID of horizontal fixed links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
fixed_ids : array of int
Array of all fixed link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the HORIZONTAL fixed links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*--18-->o------>o------>o--21-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*---9-->o------>o------>o--12-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to :any:`FIXED_VALUE_BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``V`` indicates vertical ids, which are ignored by this function
``H`` indicates horizontal :any:`ACTIVE_LINK` ids, which are ignored by
this function
Numeric values correspond to the horizontal :any:`FIXED_LINK` ID.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (fixed_link_ids,
... horizontal_fixed_link_ids)
>>> import numpy
>>> rmg = RasterModelGrid(4, 5)
>>> rmg.at_node['topographic__elevation'] = numpy.arange(
... 0, rmg.number_of_nodes)
>>> rmg.at_link['topographic__slope'] = numpy.arange(
... 0, rmg.number_of_links)
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2,
2, 0, 0, 0, 2,
2, 0, 0, 0, 2,
2, 2, 2, 2, 2], dtype=int8)
>>> fixed_ids = fixed_link_ids((4, 5), status)
>>> horizontal_fixed_link_ids((4, 5), fixed_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1,
9, -1, -1, 12,
18, -1, -1, 21,
-1, -1, -1, -1])
"""
out = np.full(number_of_horizontal_links(shape), bad_index_value,
dtype=int)
horizontal_ids = fixed_ids[np.where(~ is_vertical_link(shape, fixed_ids))]
out[nth_horizontal_link(shape, horizontal_ids)] = horizontal_ids
return out
def is_vertical_link(shape, links):
"""Test if links are vertical.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of bool
`True` for links that are vertical.
Examples
--------
>>> from landlab.grid.structured_quad.links import (is_vertical_link,
... number_of_links)
>>> import numpy as np
>>> shape = (3, 4)
>>> links = np.arange(number_of_links(shape))
>>> is_vertical_link(shape, links) # doctest: +NORMALIZE_WHITESPACE
array([False, False, False, True, True, True, True,
False, False, False, True, True, True, True,
False, False, False], dtype=bool)
"""
return (((links % (2 * shape[1] - 1)) >= shape[1] - 1) &
(links < number_of_links(shape)))
def is_horizontal_link(shape, links):
"""Test if a link is horizontal.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of bool
`True` for links that are horizontal.
Examples
--------
>>> from landlab.grid.structured_quad.links import (is_horizontal_link,
... number_of_links)
>>> import numpy as np
>>> shape = (3, 4)
>>> links = np.arange(number_of_links(shape))
>>> is_horizontal_link(shape, links) # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, False, False,
True, True, True, False, False, False, False,
True, True, True], dtype=bool)
"""
return ((~ is_vertical_link(shape, links)) &
(links < number_of_links(shape)))
def is_diagonal_link(shape, links):
"""Test if a link is diagonal.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of bool
`True` for links that are diagonal.
Examples
--------
>>> from landlab.grid.structured_quad.links import (is_diagonal_link,
... number_of_links)
>>> import numpy as np
>>> shape = (3, 4)
>>> links = np.array([0, 3, 16, 17])
>>> is_diagonal_link(shape, links) # doctest: +NORMALIZE_WHITESPACE
array([False, False, False, True], dtype=bool)
"""
return links >= number_of_links(shape)
def nth_vertical_link(shape, links):
"""Convert link ID to vertical link ID.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of int
The link ID as the nth vertical links.
Examples
--------
>>> from landlab.grid.structured_quad.links import nth_vertical_link
>>> shape = (3, 4)
>>> nth_vertical_link(shape, 4)
1
>>> nth_vertical_link(shape, (3, 4, 11))
array([0, 1, 5])
"""
links = np.asarray(links, dtype=np.int)
return as_id_array((links // (2 * shape[1] - 1)) * shape[1] +
links % (2 * shape[1] - 1) - (shape[1] - 1))
def nth_horizontal_link(shape, links):
"""Convert link ID to horizontal link ID.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
links : array of int
Array of link ids to test.
Returns
-------
ndarray of int
The link ID as the nth horizontal links.
Examples
--------
>>> from landlab.grid.structured_quad.links import nth_horizontal_link
>>> shape = (3, 4)
>>> nth_horizontal_link(shape, 16)
8
>>> nth_horizontal_link(shape, (1, 7, 8))
array([1, 3, 4])
"""
links = np.asarray(links, dtype=np.int)
return as_id_array((links // (2 * shape[1] - 1)) * (shape[1] - 1) +
links % (2 * shape[1] - 1))
def vertical_active_link_ids(shape, active_ids, bad_index_value=-1):
"""ID of vertical active links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
active_ids : array of int
Array of all active link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the VERTICAL active links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->o---H-->o---H-->o---I-->*
^ ^ ^ ^ ^
I 6 7 8 I
| | | | |
*---I-->o---H-->o---H-->o---I-->*
^ ^ ^ ^ ^
I I I I I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to :any:`CLOSED_BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``H`` indicates horizontal active ids, which are ignored by this
function
Numeric values correspond to the vertical :any:`ACTIVE_LINK` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (active_link_ids,
... vertical_active_link_ids)
>>> rmg = RasterModelGrid((4, 5))
>>> active_ids = active_link_ids((4, 5), rmg.status_at_node)
>>> active_ids # doctest: +NORMALIZE_WHITESPACE
array([ 5, 6, 7,
9, 10, 11, 12,
14, 15, 16,
18, 19, 20, 21,
23, 24, 25])
>>> vertical_active_link_ids((4, 5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, 5, 6, 7, -1,
-1, 14, 15, 16, -1,
-1, 23, 24, 25, -1])
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> active_ids = active_link_ids((4, 5), status)
>>> vertical_active_link_ids((4, 5), active_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
-1, 14, 15, 16, -1,
-1, -1, -1, -1, -1])
"""
out = np.full(number_of_vertical_links(shape), bad_index_value, dtype=int)
vertical_ids = active_ids[np.where(is_vertical_link(shape, active_ids))]
out[nth_vertical_link(shape, vertical_ids)] = vertical_ids
return out
def vertical_fixed_link_ids(shape, fixed_ids, bad_index_value=-1):
"""ID of vertical fixed links.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
fixed_ids : array of int
Array of all fixed link ids
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs at the VERTICAL fixed links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
*---I-->*---I-->*---I-->*---I-->*
^ ^ ^ ^ ^
I 23 24 25 I
| | | | |
*---H-->o---H-->o---H-->o---H-->*
^ ^ ^ ^ ^
I V V V I
| | | | |
*---H-->o---H-->o---H-->o---H-->*
^ ^ ^ ^ ^
I 5 6 7 I
| | | | |
*---I-->*---I-->*---I-->*---I-->*
.. note::
``*`` indicates the nodes that are set to
:any:`FIXED_GRADIENT_BOUNDARY`
``o`` indicates the nodes that are set to :any:`CORE_NODE`
``I`` indicates the links that are set to :any:`INACTIVE_LINK`
``H`` indicates horizontal active and fixed links, which are ignored by
this function.
``V`` indicates vertical active ids, which are ignored by this
function.
Numeric values correspond to the vertical :any:`FIXED_LINK` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (fixed_link_ids,
... vertical_fixed_link_ids)
>>> import numpy
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.at_node['topographic__elevation'] = numpy.arange(
... 0, rmg.number_of_nodes)
>>> rmg.at_link['topographic__slope'] = numpy.arange(
... 0, rmg.number_of_links)
>>> rmg.set_fixed_link_boundaries_at_grid_edges(True, True, True, True)
>>> status = rmg.status_at_node
>>> status # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2, 2,
2, 0, 0, 0, 2,
2, 0, 0, 0, 2,
2, 2, 2, 2, 2], dtype=int8)
>>> fixed_ids = fixed_link_ids((4, 5), status)
>>> vertical_fixed_link_ids((4,5), fixed_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, 5, 6, 7, -1,
-1, -1, -1, -1, -1,
-1, 23, 24, 25, -1])
"""
out = np.full(number_of_vertical_links(shape), bad_index_value, dtype=int)
vertical_ids = fixed_ids[np.where(is_vertical_link(shape, fixed_ids))]
out[nth_vertical_link(shape, vertical_ids)] = vertical_ids
return out
def horizontal_south_link_neighbor(shape, horizontal_ids,
bad_index_value=-1):
"""ID of south horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids *must be of len(horizontal_links)*.
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of south horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_south_link_neighbor(rmg.shape, horizontal_links)
array([-1, -1, -1, -1, 0, 1, 2, 3, 9, 10, 11, 12, 18, 19, 20, 21])
"""
# First, we find the shape of the horizontal link array given the shape
# of the raster model grid. In our example, the shape of horizontal links
# for a grid of 4 rows and 5 columns is 4 rows of horizontal links and 4
# columns of horizontal links.
horizontal_2d_shape = shape_of_horizontal_links(shape)
# Then, we reshape the flattend (1-D) horizontal_link_id array into the
# shape provided by the shape_of_horizontal_links() function.
horizontal_2d_array = np.reshape(horizontal_ids, horizontal_2d_shape)
# To find south links, we need to shift the IDs in the 2-D array. We first
# insert a row of bad_index_value into the top row of the array
horizontal_ids = np.insert(horizontal_2d_array, [0], bad_index_value,
axis=0)
# We find the updated array shape and number of rows for the updated array.
row_len = np.shape(horizontal_ids)[0]
# To get back to the correct array size (the one found using
# shape_of_horizontal_links), we delete the last row in the 2-D array
link_ids = np.delete(horizontal_ids, [row_len - 1], axis=0)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_horizontal_links.
south_horizontal_neighbors = link_ids.flatten()
return south_horizontal_neighbors
def horizontal_west_link_neighbor(shape, horizontal_ids,
bad_index_value=-1):
"""ID of west, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of west horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_west_link_neighbor(rmg.shape, horizontal_links)
array([-1, 0, 1, 2, -1, 9, 10, 11, -1, 18, 19, 20, -1, 27, 28, 29])
"""
# First, we find the shape of the horizontal link array given the shape
# of the raster model grid. In our example, the shape of horizontal links
# for a grid of 4 rows and 5 columns is 4 rows of horizontal links and 4
# columns of horizontal links.
horizontal_2d_shape = shape_of_horizontal_links(shape)
# Then, we reshape the flattend (1-D) horizontal_link_id array into the
# shape provided by the shape_of_horizontal_links() function.
horizontal_2d_array = np.reshape(horizontal_ids, horizontal_2d_shape)
# To find west links, we need to shift the IDs in the 2-D array. We insert
# a column of bad_index_value into the first column of the array.
horizontal_ids = np.insert(horizontal_2d_array, [0], bad_index_value,
axis=1)
# We find the updated array shape and number of columns for the updated
# array.
row_len = np.shape(horizontal_ids)[1]
# To get back to the correct array size (the one found using
# shape_of_horizontal_links), we delete the very LAST column of the 2-D
# array. (Any link final column in the 2-D array cannot be a western
# neighbor anyway).
horizontal_ids = np.delete(horizontal_ids, [row_len - 1], axis=1)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_horizontal_links.
west_horizontal_neighbors = horizontal_ids.flatten()
return west_horizontal_neighbors
def horizontal_north_link_neighbor(shape, horizontal_ids,
bad_index_value=-1):
"""ID of north, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of north horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal :any:`ACTIVE_LINK` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_north_link_neighbor(rmg.shape, horizontal_links)
array([ 9, 10, 11, 12, 18, 19, 20, 21, 27, 28, 29, 30, -1, -1, -1, -1])
"""
# First, we find the shape of the horizontal link array given the shape
# of the raster model grid. In our example, the shape of horizontal links
# for a grid of 4 rows and 5 columns is 4 rows of horizontal links and 4
# columns of horizontal links.
horizontal_2d_shape = shape_of_horizontal_links(shape)
# Then, we reshape the flattend (1-D) horizontal_link_id array into the
# shape provided by the shape_of_horizontal_links() function.
horizontal_2d_array = np.reshape(horizontal_ids, horizontal_2d_shape)
# To find north links, we need to shift the IDs in the 2-D array. We first
# delete the top row of the array
horizontal_ids = np.delete(horizontal_2d_array, [0], axis=0)
# We find the updated array shape and number of rows for the updated array.
row_len = np.shape(horizontal_ids)[0]
# To get back to the correct array size (the one found using
# shape_of_horizontal_links), we insert a row (populated with
# bad_index_value_ into the end of the 2-D array.
link_ids = np.insert(horizontal_ids, [row_len], bad_index_value,
axis=0)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_horizontal_links.
north_horizontal_neighbors = link_ids.flatten()
return north_horizontal_neighbors
def horizontal_east_link_neighbor(shape, horizontal_ids,
bad_index_value=-1):
"""IDs of east, horizontal link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of east horizontal neighbor links. Length of
number_of_horizontal_links.
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal :any:`ACTIVE_LINK` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> horizontal_east_link_neighbor(rmg.shape, horizontal_links)
array([ 1, 2, 3, -1, 10, 11, 12, -1, 19, 20, 21, -1, 28, 29, 30, -1])
"""
# First, we find the shape of the horizontal link array given the shape
# of the raster model grid. In our example, the shape of horizontal links
# for a grid of 4 rows and 5 columns is 4 rows of horizontal links and 4
# columns of horizontal links.
horizontal_2d_shape = shape_of_horizontal_links(shape)
# Then, we reshape the flattend (1-D) horizontal_link_id array into the
# shape provided by the shape_of_horizontal_links() function.
horizontal_2d_array = np.reshape(horizontal_ids, horizontal_2d_shape)
# To find west links, we need to shift the IDs in the 2-D array. We first
# delete the first column of the array (these values can never be east
# neighbors anyway.)
horizontal_ids = np.delete(horizontal_2d_array, [0], axis=1)
# We find the updated array shape and number of columns for the updated
# array.
row_len = np.shape(horizontal_ids)[1]
# To get back to the correct array size (the one found using
# shape_of_horizontal_links), we insert a column of bad_index_value into
# the last column spot in the 2-D array.
link_ids = np.insert(horizontal_ids, [row_len], bad_index_value,
axis=1)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_horizontal_links.
east_horizontal_neighbors = link_ids.flatten()
return east_horizontal_neighbors
def d4_horizontal_link_neighbors(shape, horizontal_ids, bad_index_value=-1):
"""IDs of all 4 horizontal link neighbors.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Array of 4 horizontal link neighbors for a given link ID. Returned in
[E, N, W, S].
Examples
--------
Sample grid, giving neighbors for link ID 10::
*------>*------>*------>*------>*
*------>*--19-->*------>*------>*
*---9-->*--10-->*--11-->*------>*
*------>*---1-->*------>*------>*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> horizontal_links = horizontal_link_ids(rmg.shape).flatten()
>>> d4_horizontal_link_neighbors(rmg.shape, horizontal_links)
array([[ 1, 9, -1, -1],
[ 2, 10, 0, -1],
[ 3, 11, 1, -1],
[-1, 12, 2, -1],
[10, 18, -1, 0],
[11, 19, 9, 1],
[12, 20, 10, 2],
[-1, 21, 11, 3],
[19, 27, -1, 9],
[20, 28, 18, 10],
[21, 29, 19, 11],
[-1, 30, 20, 12],
[28, -1, -1, 18],
[29, -1, 27, 19],
[30, -1, 28, 20],
[-1, -1, 29, 21]])
"""
# First we find *south* neighbors...
south = horizontal_south_link_neighbor(shape, horizontal_ids,
bad_index_value)
# Then *west* neighbors...
west = horizontal_west_link_neighbor(shape, horizontal_ids,
bad_index_value)
# Then *north* neighbors...
north = horizontal_north_link_neighbor(shape, horizontal_ids,
bad_index_value)
# Finally, *east* neighbors...
east = horizontal_east_link_neighbor(shape, horizontal_ids,
bad_index_value)
# Combine all 4 neighbor arrays into one large array
# (4 x len_horizontal_links)
neighbor_array = np.array([east, north, west, south])
# Transpose the 4 neighbor arrays into a (len_horizontal_links x 4) array.
neighbor_array = np.transpose(neighbor_array)
# Output neighbor array. For each input ID, returns [S,W,N,E]
return neighbor_array
def d4_horizontal_active_link_neighbors(shape, horizontal_ids,
bad_index_value=-1):
"""returns IDs of all 4 horizontal link neighbors.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
horizontal_ids : array of int
Array of all horizontal link ids - *must be of len(horizontal_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray
Array of 4 horizontal link neighbors for a given link ID. Returned in
[E, N, W, S]. Returns array for only ACTIVE horizontal links.
Examples
--------
Sample grid, giving neighbors for link ID 20::
*------>*------>*------>*------>*
*------>*--19-->*--20-->*------>*
*------>*--10-->*--11-->*------>*
*------>*------>*------>*------>*
.. note::
Only horizontal links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the horizontal :any:`ACTIVE_LINK` IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid((4, 5))
>>> rmg.set_closed_boundaries_at_grid_edges(True, True, True, True)
>>> active_ids = active_link_ids(rmg.shape, rmg.status_at_node)
>>> horizontal_ids = horizontal_active_link_ids(
... rmg.shape, active_ids)
>>> d4_horizontal_active_link_neighbors(rmg.shape, horizontal_ids)
array([[11, 19, -1, -1],
[-1, 20, 10, -1],
[20, -1, -1, 10],
[-1, -1, 19, 11]])
"""
# To do this we simply call the find_d4_horizontal_neighbors() function
# which gives the neighbors for ALL horizontal links in an array, even
# inactive links.
d4_neigh = d4_horizontal_link_neighbors(shape, horizontal_ids,
bad_index_value)
# Now we will just focus on indices that are ACTIVE...
active_links = np.where(horizontal_ids != bad_index_value)
# Clip our initial array into a smaller one with just active neighbors
neighbor_array = d4_neigh[active_links]
# Output neighbor array. For each input ID, returns [S,W,N,E]
return neighbor_array
def vertical_south_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of south, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids - MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *south* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid((4, 5))
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_south_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
4, 5, 6, 7, 8,
13, 14, 15, 16, 17])
"""
# First, we find the shape of the vertical link array given the shape
# of the raster model grid. In our example, the shape of vertical links for
# a grid of 4 rows and 5 columns is 3 rows of vertical links and 5 columns
# of vertical links.
vertical_2d_shape = shape_of_vertical_links(shape)
# Then, we reshape the flattend (1-D) vertical_link_id array into the shape
# provided by the shape_of_vertical_links() function.
vertical_2d_array = np.reshape(vertical_ids, vertical_2d_shape)
# To find south links, we need to shift the IDs in the 2-D array. We insert
# a row of bad_index_value into the top row of the 2-D array
link_ids = np.insert(vertical_2d_array, [0], bad_index_value, axis=0)
# We find the updated array shape and number of rows for the updated array.
row_len = np.shape(link_ids)[0]
# To get back to the correct array size (the one found using
# shape_of_vertical_links), we delete a the last row of the 2-D array.
vertical_ids = np.delete(link_ids, [row_len - 1], axis=0)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_vertical_links.
south_vertical_neighbors = vertical_ids.flatten()
return south_vertical_neighbors
def vertical_west_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of west, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids- MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *west* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_west_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([-1, 4, 5, 6, 7,
-1, 13, 14, 15, 16,
-1, 22, 23, 24, 25])
"""
# First, we find the shape of the vertical link array given the shape
# of the raster model grid. In our example, the shape of vertical links for
# a grid of 4 rows and 5 columns is 3 rows of vertical links and 5 columns
# of vertical links.
vertical_2d_shape = shape_of_vertical_links(shape)
# Then, we reshape the flattend (1-D) vertical_link_id array into the shape
# provided by the shape_of_vertical_links() function.
vertical_2d_array = np.reshape(vertical_ids, vertical_2d_shape)
# To find west links, we need to shift the IDs in the 2-D array. We insert
# a column of bad_index_value into the first column of the array.
vertical_ids = np.insert(vertical_2d_array, [0], bad_index_value,
axis=1)
# We find the updated array shape and number of columns for the updated
# array.
row_len = np.shape(vertical_ids)[1]
# To get back to the correct array size (the one found using
# shape_of_vertical_links), we delete the very LAST column of the 2-D
# array. (Any link final column in the 2-D array cannot be a western
# neighbor anyway).
vertical_ids = np.delete(vertical_ids, [row_len - 1], axis=1)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_vertical_links.
west_vertical_neighbors = vertical_ids.flatten()
return west_vertical_neighbors
def vertical_north_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of north, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids- MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *north* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> vertical_ids = vertical_link_ids(rmg.shape)
>>> vertical_north_link_neighbor(rmg.shape, vertical_ids)
... # doctest: +NORMALIZE_WHITESPACE
array([13, 14, 15, 16, 17,
22, 23, 24, 25, 26,
-1, -1, -1, -1, -1])
"""
# First, we find the shape of the vertical link array given the shape
# of the raster model grid. In our example, the shape of vertical links for
# a grid of 4 rows and 5 columns is 3 rows of vertical links and 5 columns
# of vertical links.
vertical_2d_shape = shape_of_vertical_links(shape)
# Then, we reshape the flattend (1-D) vertical_link_id array into the shape
# provided by the shape_of_vertical_links() function.
vertical_2d_array = np.reshape(vertical_ids, vertical_2d_shape)
# To find north links, we need to shift the IDs in the 2-D array. We first
# delete the first row of the array.
vertical_ids = np.delete(vertical_2d_array, [0], axis=0)
# We find the updated array shape and number of rows for the updated array.
row_len = np.shape(vertical_ids)[0]
# To get back to the correct array size (the one found using
# shape_of_vertical_links), we insert a row (populated with
# bad_index_value) into the end of the 2-D array.
link_ids = np.insert(vertical_ids, [row_len], bad_index_value,
axis=0)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_vertical_links.
north_vertical_neighbors = link_ids.flatten()
return north_vertical_neighbors
def vertical_east_link_neighbor(shape, vertical_ids, bad_index_value=-1):
"""Link IDs of east, vertical link neighbor.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids - MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Link IDs of *east* vertical neighbor links. Length of
number_of_vertical_links.
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> vertical_links = vertical_link_ids(rmg.shape)
>>> vertical_east_link_neighbor(rmg.shape, vertical_links)
... # doctest: +NORMALIZE_WHITESPACE
array([ 5, 6, 7, 8, -1,
14, 15, 16, 17, -1,
23, 24, 25, 26, -1])
"""
# First, we find the shape of the vertical link array given the shape
# of the raster model grid. In our example, the shape of vertical links for
# a grid of 4 rows and 5 columns is 3 rows of vertical links and 5 columns
# of vertical links.
vertical_2d_shape = shape_of_vertical_links(shape)
# Then, we reshape the flattend (1-D) vertical_link_id array into the shape
# provided by the shape_of_vertical_links() function.
vertical_2d_array = np.reshape(vertical_ids, vertical_2d_shape)
# To find east links, we need to shift the IDs in the 2-D array. We first
# delete the first column of the array.
vertical_ids = np.delete(vertical_2d_array, [0], axis=1)
# We find the updated array shape and number of columns for the updated
# array.
row_len = np.shape(vertical_ids)[1]
# To get back to the correct array size (the one found using
# shape_of_vertical_links), we insert a column (populated with
# bad_index_value) into the end of the 2-D array.
link_ids = np.insert(vertical_ids, [row_len], bad_index_value,
axis=1)
# Once we have shifted the 2-D array and removed extra indices, we can
# flatten the output array to a 1-D array with length of
# number_of_vertical_links.
east_vertical_neighbors = link_ids.flatten()
return east_vertical_neighbors
def d4_vertical_link_neighbors(shape, vertical_ids, bad_index_value=-1):
"""IDs of all 4 vertical link neighbors.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_ids : array of int
Array of all vertical link ids - MUST BE ARRAY OF LEN(VERTICAL_LINKS)
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Array of 4 vertical link neighbors for a given link ID. Returned in
[E, N, W, S].
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import *
>>> rmg = RasterModelGrid(4, 5)
>>> vertical_ids = vertical_link_ids(rmg.shape)
>>> d4_vertical_link_neighbors(rmg.shape, vertical_ids)
array([[ 5, 13, -1, -1],
[ 6, 14, 4, -1],
[ 7, 15, 5, -1],
[ 8, 16, 6, -1],
[-1, 17, 7, -1],
[14, 22, -1, 4],
[15, 23, 13, 5],
[16, 24, 14, 6],
[17, 25, 15, 7],
[-1, 26, 16, 8],
[23, -1, -1, 13],
[24, -1, 22, 14],
[25, -1, 23, 15],
[26, -1, 24, 16],
[-1, -1, 25, 17]])
"""
south = vertical_south_link_neighbor(shape, vertical_ids, bad_index_value)
west = vertical_west_link_neighbor(shape, vertical_ids, bad_index_value)
north = vertical_north_link_neighbor(shape, vertical_ids, bad_index_value)
east = vertical_east_link_neighbor(shape, vertical_ids, bad_index_value)
neighbor_array = np.array([east, north, west, south])
neighbor_array = np.transpose(neighbor_array)
return neighbor_array
def d4_vertical_active_link_neighbors(shape, vertical_ids, bad_index_value=-1):
"""IDs of all 4 vertical link neighbors.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
vertical_active_ids : array of int
Array of all vertical link ids - *must be of len(vertical_links)*
bad_index_value: int, optional
Value assigned to inactive indicies in the array.
Returns
-------
ndarray :
Array of 4 vertical link neighbors for a given ACTIVE link ID.
Returned in [E, N, W, S].
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown. When no neighbor is found,
bad_index_value is returned.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (active_link_ids,
... vertical_active_link_ids, d4_vertical_active_link_neighbors)
>>> rmg = RasterModelGrid(4, 5)
>>> active_link_ids = active_link_ids(rmg.shape, rmg.status_at_node)
>>> vertical_active_ids = vertical_active_link_ids(
... rmg.shape, active_link_ids)
>>> d4_vertical_active_link_neighbors(rmg.shape, vertical_active_ids)
array([[ 6, 14, -1, -1],
[ 7, 15, 5, -1],
[-1, 16, 6, -1],
[15, 23, -1, 5],
[16, 24, 14, 6],
[-1, 25, 15, 7],
[24, -1, -1, 14],
[25, -1, 23, 15],
[-1, -1, 24, 16]])
"""
# To do this we simply call the find_d4_vertical_neighbors() function
# which gives the neighbors for ALL vertical links in an array, even
# inactive links.
d4_all_neighbors = d4_vertical_link_neighbors(shape, vertical_ids,
bad_index_value)
# Now we will just focus on indices that are ACTIVE...
active_links = np.where(vertical_ids != bad_index_value)
# Clip our initial array into a smaller one with just active neighbors
neighbor_array = d4_all_neighbors[active_links]
# Output neighbor array. For each input ID, returns [S,W,N,E]
return neighbor_array
def bottom_edge_horizontal_ids(shape):
"""Link IDs of bottom edge horizontal links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of bottom edge horizontal links. Length is
(rmg.number_of_columns-1)
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (
... bottom_edge_horizontal_ids)
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> bottom_edge_horizontal_ids(shape)
array([0, 1, 2, 3])
"""
# First, we find all horizontal link ids for the RasterModelGrid shape.
horizontal_id_array = horizontal_link_ids(shape)
# Then we slice the first column and return it. This has our bottom edge
# horizontal ids. This array should be equal in length to (number of
# columns - 1)
bottom_edge_hori_ids = horizontal_id_array[0]
return bottom_edge_hori_ids
def left_edge_horizontal_ids(shape):
"""Link IDs of left edge horizontal links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of left edge horizontal links. Length is (rmg.number_of_rows)
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import left_edge_horizontal_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> left_edge_horizontal_ids(shape)
array([ 0, 9, 18, 27])
"""
# First, we find all horizontal link ids for the RasterModelGrid shape.
horizontal_id_array = horizontal_link_ids(shape)
# Then we slice the first column and return it. This has our left edge
# horizontal ids. This array should be equal in length to (number of rows)
left_edge_hori_ids = horizontal_id_array[:, 0]
return left_edge_hori_ids
def top_edge_horizontal_ids(shape):
"""IDs of top edge horizontal links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of top edge horizontal links. Length is
(rmg.number_of_columns - 1)
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import top_edge_horizontal_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> top_edge_horizontal_ids(shape)
array([27, 28, 29, 30])
"""
# First, we find all horizontal link ids for the RasterModelGrid shape.
horizontal_id_array = horizontal_link_ids(shape)
# Then we slice the first column and return it. This has our top edge
# horizontal ids. This array should be equal in length to (number of
# columns - 1)
top_edge_hori_ids = horizontal_id_array[(shape[0] - 1)]
return top_edge_hori_ids
def right_edge_horizontal_ids(shape):
"""IDs of right edge horizontal links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of left edge horizontal links. Length is (rmg.number_of_rows)
Examples
--------
The following example uses this grid::
*--27-->*--28-->*--29-->*--30-->*
*--18-->*--19-->*--20-->*--21-->*
*---9-->*--10-->*--11-->*--12-->*
*---0-->*---1-->*---2-->*---3-->*
.. note::
Only horizontal links are shown.
``*`` indicates nodes
Numeric values correspond to the horizontal IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import (
... right_edge_horizontal_ids)
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> right_edge_horizontal_ids(shape)
array([ 3, 12, 21, 30])
"""
# First, we find all horizontal link ids for the RasterModelGrid shape.
horizontal_id_array = horizontal_link_ids(shape)
# Then we slice the last column and return it. This has our right edge
# horizontal ids. This array should be equal in length to (number of
# columns - 2)
right_edge_hori_ids = horizontal_id_array[:, (shape[1] - 2)]
return right_edge_hori_ids
def bottom_edge_vertical_ids(shape):
"""Link IDs of bottom edge vertical links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of bottom edge vertical links. Length is
(rmg.number_of_columns)
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import bottom_edge_vertical_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> bottom_edge_vertical_ids(shape)
array([4, 5, 6, 7, 8])
"""
# First, we find all vertical link ids for the RasterModelGrid shape.
vertical_id_array = vertical_link_ids(shape)
# Then we slice the first column and return it. This has our bottom edge
# vertical ids. This array should be equal in length to (number of columns)
bottom_edge_vert_ids = vertical_id_array[0]
return bottom_edge_vert_ids
def left_edge_vertical_ids(shape):
"""Link IDs of left edge vertical links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of left edge vertical links. Length is
(rmg.number_of_rows - 1)
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import left_edge_vertical_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> left_edge_vertical_ids(shape)
array([ 4, 13, 22])
"""
# First, we find all vertical link ids for the RasterModelGrid shape.
vertical_id_array = vertical_link_ids(shape)
# Then we slice the first column and return it. This has our left edge
# vertical ids. This array should be equal in length to
# (number of rows - 1)
left_edge_vert_ids = vertical_id_array[:, 0]
return left_edge_vert_ids
def top_edge_vertical_ids(shape):
"""Link IDs of top edge vertical links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of top edge vertical links. Length is (rmg.number_of_columns)
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import top_edge_vertical_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> top_edge_vertical_ids(shape)
array([22, 23, 24, 25, 26])
"""
# First, we find all vertical link ids for the RasterModelGrid shape.
vertical_id_array = vertical_link_ids(shape)
# Then we slice the first column and return it. This has our top edge
# vertical ids. This array should be equal in length to (number of columns)
top_edge_vert_ids = vertical_id_array[(shape[0] - 2)]
return top_edge_vert_ids
def right_edge_vertical_ids(shape):
"""Link IDs of right edge vertical links.
Parameters
----------
shape : tuple of int
Shape of grid, given as (rows, columns) of nodes.
Returns
-------
ndarray :
Link IDs of left edge vertical links. Length is
(rmg.number_of_rows - 1)
Examples
--------
The following example uses this grid::
* * * * *
^ ^ ^ ^ ^
22 23 24 25 26
| | | | |
* * * * *
^ ^ ^ ^ ^
13 14 15 16 17
| | | | |
* * * * *
^ ^ ^ ^ ^
4 5 6 7 8
| | | | |
* * * * *
.. note::
Only vertical links are shown.
``*`` indicates nodes
Numeric values correspond to the vertical IDs.
>>> from landlab import RasterModelGrid
>>> from landlab.grid.structured_quad.links import right_edge_vertical_ids
>>> rmg = RasterModelGrid(4, 5)
>>> shape = rmg.shape
>>> right_edge_vertical_ids(shape)
array([ 8, 17, 26])
"""
# First, we find all vertical link ids for the RasterModelGrid shape.
vertical_id_array = vertical_link_ids(shape)
# Then we slice the last column and return it. This has our right edge
# vertical ids. This array should be equal in length to
# (number of rows - 1)
right_edge_vert_ids = vertical_id_array[:, (shape[1] - 1)]
return right_edge_vert_ids
class StructuredQuadLinkGrid(LinkGrid):
def __init__(self, shape):
link_ends = (node_id_at_link_start(shape), node_id_at_link_end(shape))
number_of_nodes = np.prod(shape)
LinkGrid.__init__(self, link_ends, number_of_nodes)
| mit |
medallia/aurora | src/main/python/apache/aurora/executor/common/announcer_zkauth_schema.py | 8 | 1155 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# checkstyle: noqa
from pystachio import Boolean, Default, List, Required, String, Struct
class Auth(Struct):
scheme = Required(String)
credential = Required(String)
class Permissions(Struct):
read = Default(Boolean, False)
write = Default(Boolean, False)
create = Default(Boolean, False)
delete = Default(Boolean, False)
admin = Default(Boolean, False)
class Access(Struct):
scheme = Required(String)
credential = Required(String)
permissions = Required(Permissions)
class ZkAuth(Struct):
auth = Default(List(Auth), [])
acl = Default(List(Access), [])
| apache-2.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/asynchat.py | 247 | 11402 | # -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <[email protected]>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
from sys import py3kwarning
from warnings import filterwarnings, catch_warnings
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = ''
# we use a list here rather than cStringIO for a few reasons...
# del lst[:] is faster than sio.truncate(0)
# lst = [] is faster than sio.truncate(0)
# cStringIO will be gaining unicode support in py3k, which
# will negatively affect the performance of bytes compared to
# a ''.join() equivalent
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = ''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error, why:
self.handle_error()
return
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
elif isinstance(terminator, int) or isinstance(terminator, long):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = ''
def handle_write (self):
self.initiate_send()
def handle_close (self):
self.close()
def push (self, data):
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in xrange(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
self.handle_close()
return
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
with catch_warnings():
if py3kwarning:
filterwarnings("ignore", ".*buffer", DeprecationWarning)
data = buffer(first, 0, obs)
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
# send the data
try:
num_sent = self.send(data)
except socket.error:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = ''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = ''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
| apache-2.0 |
12019/python-gsmmodem | examples/dial_polling_demo.py | 12 | 2624 | #!/usr/bin/env python
"""\
Demo: dial a number (simple example using polling to check call status)
Simple demo app that makes a voice call and plays sone DTMF tones (if supported by modem)
when the call is answered, and hangs up the call.
It polls the call status to see if the call has been answered
Note: you need to modify the NUMBER variable for this to work
"""
from __future__ import print_function
import sys, time, logging
PORT = '/dev/ttyUSB2'
BAUDRATE = 115200
NUMBER = '00000' # Number to dial - CHANGE THIS TO A REAL NUMBER
PIN = None # SIM card PIN (if any)
from gsmmodem.modem import GsmModem
from gsmmodem.exceptions import InterruptedException, CommandError
def main():
if NUMBER == None or NUMBER == '00000':
print('Error: Please change the NUMBER variable\'s value before running this example.')
sys.exit(1)
print('Initializing modem...')
#logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
modem = GsmModem(PORT, BAUDRATE)
modem.connect(PIN)
print('Waiting for network coverage...')
modem.waitForNetworkCoverage(30)
print('Dialing number: {0}'.format(NUMBER))
call = modem.dial(NUMBER)
print('Waiting for call to be answered/rejected')
wasAnswered = False
while call.active:
if call.answered:
wasAnswered = True
print('Call has been answered; waiting a while...')
# Wait for a bit - some older modems struggle to send DTMF tone immediately after answering a call
time.sleep(3.0)
print('Playing DTMF tones...')
try:
if call.active: # Call could have been ended by remote party while we waited in the time.sleep() call
call.sendDtmfTone('9515999955951')
except InterruptedException as e:
# Call was ended during playback
print('DTMF playback interrupted: {0} ({1} Error {2})'.format(e, e.cause.type, e.cause.code))
except CommandError as e:
print('DTMF playback failed: {0}'.format(e))
finally:
if call.active: # Call is still active
print('Hanging up call...')
call.hangup()
else: # Call is no longer active (remote party ended it)
print('Call has been ended by remote party')
else:
# Wait a bit and check again
time.sleep(0.5)
if not wasAnswered:
print('Call was not answered by remote party')
print('Done.')
modem.close()
if __name__ == '__main__':
main()
| lgpl-3.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/lib/surface/pubsub/topics/list_subscriptions.py | 6 | 3464 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Cloud Pub/Sub topics list_subscriptions command."""
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.pubsub import util
from googlecloudsdk.core.resource import resource_printer_base
from googlecloudsdk.core.resource import resource_projector
class ListSubscriptions(base.ListCommand):
"""Lists Cloud Pub/Sub subscriptions from a given topic.
Lists all of the Cloud Pub/Sub subscriptions attached to the given topic and
that match the given filter.
"""
detailed_help = {
'EXAMPLES': """\
To filter results by subscription name
(ie. only show subscription 'mysubs'), run:
$ {command} --topic mytopic --filter=subscriptionId:mysubs
To combine multiple filters (with AND or OR), run:
$ {command} --topic mytopic --filter="subscriptionId:mysubs1 AND subscriptionId:mysubs2"
To filter subscriptions that match an expression:
$ {command} --topic mytopic --filter="subscriptionId:subs_*"
""",
}
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'topic',
help=('The name of the topic to list subscriptions for.'))
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Yields:
Subscriptions paths that match the regular expression in args.name_filter.
"""
msgs = self.context['pubsub_msgs']
pubsub = self.context['pubsub']
page_size = None
page_token = None
if args.page_size:
page_size = min(args.page_size, util.MAX_LIST_RESULTS)
if not args.filter and args.limit:
page_size = min(args.limit, page_size or util.MAX_LIST_RESULTS)
while True:
list_subscriptions_req = (
msgs.PubsubProjectsTopicsSubscriptionsListRequest(
topic=util.TopicFormat(args.topic),
pageSize=page_size,
pageToken=page_token))
list_subscriptions_result = pubsub.projects_topics_subscriptions.List(
list_subscriptions_req)
for subscription in list_subscriptions_result.subscriptions:
yield TopicSubscriptionDict(subscription)
page_token = list_subscriptions_result.nextPageToken
if not page_token:
break
yield resource_printer_base.PageMarker()
def TopicSubscriptionDict(topic_subscription):
"""Returns a topic_subscription dict with additional fields."""
result = resource_projector.MakeSerializable(
{'subscription': topic_subscription})
subscription_info = util.SubscriptionIdentifier(topic_subscription)
result['projectId'] = subscription_info.project.project_name
result['subscriptionId'] = subscription_info.resource_name
return result
| apache-2.0 |
devops2014/djangosite | tests/migrations/test_base.py | 25 | 2508 | import os
from django.db import connection
from django.test import TransactionTestCase
from django.utils._os import upath
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
test_dir = os.path.abspath(os.path.dirname(upath(__file__)))
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
| bsd-3-clause |
Jai-Chaudhary/vislab | vislab/vw.py | 4 | 7157 | """
Train and test Vowpal Wabbit classifier or regressor on data.
Training includes cross-validation over parameters.
"""
import os
import shutil
import socket
import vislab
import vislab.results
import vislab.vw3
def test(
collection_name, dataset, source_dataset, feature_names,
force=False, num_workers=1, bit_precision=18, verbose=False):
print("{} running VW testing on {}, trained on {} for {}".format(
socket.gethostname(), dataset['name'],
source_dataset['name'], source_dataset['task']
))
print("Using {}".format(feature_names))
# To check for existing record of the final score, we construct a
# query document.
document = {
'features': feature_names,
'task': dataset['task'],
'quadratic': False,
}
# The salient parts include the type of data: 'rating', 'style', etc.
document.update(dataset['salient_parts'])
# The results are stored in a Mongo database called 'predict'.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
if not force:
result = collection.find_one(document)
if result is not None:
print("Already classified this, not doing anything.")
print(document)
print("(Score was {:.3f})".format(result['score_test']))
return
source_dirname = '{}/{}'.format(
vislab.config['paths']['predict_stable'],
source_dataset['dataset_name'])
# Run VW.
feat_dirname = \
vislab.config['paths']['feats'] + '/' + dataset['dataset_name']
vw = vislab.vw3.VW(
vislab.config['paths']['predict_temp'], dataset['dataset_name'],
num_workers, bit_precision
)
pred_df, test_score, val_score, train_score = vw.predict(
dataset, source_dataset, source_dirname, feature_names, feat_dirname,
force
)
# Write out results to filesystem.
results_name = '_'.join(
'{}_{}'.format(k, v) for k, v in sorted(document.iteritems()))
pred_df_filename = '{}/{}.h5'.format(
vislab.util.makedirs(vislab.config['paths']['results']),
results_name
)
pred_df.to_hdf(pred_df_filename, 'df', mode='w')
original_document = document.copy()
document.update({
'score_test': test_score,
'score_val': val_score,
'results_name': results_name
})
collection.update(original_document, document, upsert=True)
print("Final score: {:.3f}".format(test_score))
# No need to copy this to stable, since we didn't train any new models.
def train_and_test(
collection_name, dataset, feature_names,
force=False, num_workers=6,
num_passes=[10], loss=['logistic'], l1=[0], l2=[0],
quadratic='', bit_precision=18, verbose=False):
"""
Train and test using VW with the given features and quadratic
expansion.
Features are assumed to be stored in VW format in canonical location.
Cross-validates over all combinations of given parameter choices.
Parameters
----------
collection_name: string
Name of the MongoDB 'predict' database collection that contains
the prediction results.
dataset: dict
Contains name information and DataFrames for train, val,
and test splits.
feature_names: list of string
Features to use.
force: boolean [False]
num_workers: int [4]
VW parameter tuning will run in parallel with this many workers,
on the same machine and reading from the same cache file.
num_passes: list of int
loss: list of string [['logistic']]
Acceptable choices are [
'hinge', 'squared', 'logistic', 'quantile'
]
l1_weight: list of float [[0]]
l2_weight: list of float [[0]]
quadratic: string ['']
If a non-empty string is given, it must be a sequence of single
letter corresponding to the namespaces that will be crossed.
verbose: boolean [False]
"""
print("{} running VW on {} for {}".format(
socket.gethostname(), dataset['name'], dataset['task']))
print("Using {}, quadratic: {}".format(feature_names, quadratic))
# To check for existing record of the final score, we construct a
# query document.
document = {
'features': feature_names,
'task': dataset['task'],
'quadratic': quadratic
}
# The salient parts include the type of data: 'rating', 'style', etc.
document.update(dataset['salient_parts'])
# Right now, we write into a temporary directory, since we'll write
# large cache files. After we're done, we'll copy into a stable dir.
feat_dirname = \
vislab.config['paths']['feats'] + '/' + dataset['dataset_name']
vw = vislab.vw3.VW(
vislab.config['paths']['predict_temp'], dataset['dataset_name'],
num_workers, bit_precision, num_passes, loss, l1, l2, quadratic
)
# The results are stored in a Mongo database called 'predict'.
client = vislab.util.get_mongodb_client()
collection = client['predict'][collection_name]
if not force:
result = collection.find_one(document)
if result is not None:
print("Already classified this, and stable dir exists!")
print(document)
print("(Score was {:.3f})".format(result['score_test']))
return
# Run VW.
pred_df, test_score, val_score, train_score = vw.fit_and_predict(
dataset, feature_names, feat_dirname, force)
# Write out results to filesystem.
results_name = '_'.join(
'{}_{}'.format(k, v) for k, v in sorted(document.iteritems()))
pred_df_filename = '{}/{}.h5'.format(
vislab.util.makedirs(vislab.config['paths']['results']),
results_name
)
pred_df.to_hdf(pred_df_filename, 'df', mode='w')
original_document = document.copy()
document.update({
'score_test': test_score,
'score_val': val_score,
'results_name': results_name
})
collection.update(original_document, document, upsert=True)
print("Final score: {:.3f}".format(test_score))
# After we're done, we delete the large cache files and copy the
# trained models and stuff to a permanent directory, such that
# the models can be used in the future.
for root, dirs, files in os.walk(vw.dirname):
for file_ in filter(lambda x: x == 'cache.vw', files):
os.remove(os.path.join(root, file_))
vislab.util.makedirs(vislab.config['paths']['predict_stable'])
stable_dirname = os.path.join(
vislab.config['paths']['predict_stable'], vw.partial_dirname)
root_src_dir = vw.dirname
root_dst_dir = stable_dirname
for src_dir, dirs, files in os.walk(root_src_dir):
dst_dir = src_dir.replace(root_src_dir, root_dst_dir)
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
for file_ in files:
src_file = os.path.join(src_dir, file_)
dst_file = os.path.join(dst_dir, file_)
if os.path.exists(dst_file):
os.remove(dst_file)
shutil.move(src_file, dst_dir)
| bsd-2-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/ply/cpp.py | 16 | 33639 | # -----------------------------------------------------------------------------
# cpp.py
#
# Author: David Beazley (http://www.dabeaz.com)
# Copyright (C) 2007
# All rights reserved
#
# This module implements an ANSI-C style lexical preprocessor for PLY.
# -----------------------------------------------------------------------------
from __future__ import generators
import sys
# Some Python 3 compatibility shims
if sys.version_info.major < 3:
STRING_TYPES = (str, unicode)
else:
STRING_TYPES = str
xrange = range
# -----------------------------------------------------------------------------
# Default preprocessor lexer definitions. These tokens are enough to get
# a basic preprocessor working. Other modules may import these if they want
# -----------------------------------------------------------------------------
tokens = (
'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
)
literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
# Whitespace
def t_CPP_WS(t):
r'\s+'
t.lexer.lineno += t.value.count("\n")
return t
t_CPP_POUND = r'\#'
t_CPP_DPOUND = r'\#\#'
# Identifier
t_CPP_ID = r'[A-Za-z_][\w_]*'
# Integer literal
def CPP_INTEGER(t):
r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
return t
t_CPP_INTEGER = CPP_INTEGER
# Floating literal
t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
# String literal
def t_CPP_STRING(t):
r'\"([^\\\n]|(\\(.|\n)))*?\"'
t.lexer.lineno += t.value.count("\n")
return t
# Character constant 'c' or L'c'
def t_CPP_CHAR(t):
r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
t.lexer.lineno += t.value.count("\n")
return t
# Comment
def t_CPP_COMMENT1(t):
r'(/\*(.|\n)*?\*/)'
ncr = t.value.count("\n")
t.lexer.lineno += ncr
# replace with one space or a number of '\n'
t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
return t
# Line comment
def t_CPP_COMMENT2(t):
r'(//.*?(\n|$))'
# replace with '/n'
t.type = 'CPP_WS'; t.value = '\n'
return t
def t_error(t):
t.type = t.value[0]
t.value = t.value[0]
t.lexer.skip(1)
return t
import re
import copy
import time
import os.path
# -----------------------------------------------------------------------------
# trigraph()
#
# Given an input string, this function replaces all trigraph sequences.
# The following mapping is used:
#
# ??= #
# ??/ \
# ??' ^
# ??( [
# ??) ]
# ??! |
# ??< {
# ??> }
# ??- ~
# -----------------------------------------------------------------------------
_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
_trigraph_rep = {
'=':'#',
'/':'\\',
"'":'^',
'(':'[',
')':']',
'!':'|',
'<':'{',
'>':'}',
'-':'~'
}
def trigraph(input):
return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
# ------------------------------------------------------------------
# Macro object
#
# This object holds information about preprocessor macros
#
# .name - Macro name (string)
# .value - Macro value (a list of tokens)
# .arglist - List of argument names
# .variadic - Boolean indicating whether or not variadic macro
# .vararg - Name of the variadic parameter
#
# When a macro is created, the macro replacement token sequence is
# pre-scanned and used to create patch lists that are later used
# during macro expansion
# ------------------------------------------------------------------
class Macro(object):
def __init__(self,name,value,arglist=None,variadic=False):
self.name = name
self.value = value
self.arglist = arglist
self.variadic = variadic
if variadic:
self.vararg = arglist[-1]
self.source = None
# ------------------------------------------------------------------
# Preprocessor object
#
# Object representing a preprocessor. Contains macro definitions,
# include directories, and other information
# ------------------------------------------------------------------
class Preprocessor(object):
def __init__(self,lexer=None):
if lexer is None:
lexer = lex.lexer
self.lexer = lexer
self.macros = { }
self.path = []
self.temp_path = []
# Probe the lexer for selected tokens
self.lexprobe()
tm = time.localtime()
self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
self.parser = None
# -----------------------------------------------------------------------------
# tokenize()
#
# Utility function. Given a string of text, tokenize into a list of tokens
# -----------------------------------------------------------------------------
def tokenize(self,text):
tokens = []
self.lexer.input(text)
while True:
tok = self.lexer.token()
if not tok: break
tokens.append(tok)
return tokens
# ---------------------------------------------------------------------
# error()
#
# Report a preprocessor error/warning of some kind
# ----------------------------------------------------------------------
def error(self,file,line,msg):
print("%s:%d %s" % (file,line,msg))
# ----------------------------------------------------------------------
# lexprobe()
#
# This method probes the preprocessor lexer object to discover
# the token types of symbols that are important to the preprocessor.
# If this works right, the preprocessor will simply "work"
# with any suitable lexer regardless of how tokens have been named.
# ----------------------------------------------------------------------
def lexprobe(self):
# Determine the token type for identifiers
self.lexer.input("identifier")
tok = self.lexer.token()
if not tok or tok.value != "identifier":
print("Couldn't determine identifier type")
else:
self.t_ID = tok.type
# Determine the token type for integers
self.lexer.input("12345")
tok = self.lexer.token()
if not tok or int(tok.value) != 12345:
print("Couldn't determine integer type")
else:
self.t_INTEGER = tok.type
self.t_INTEGER_TYPE = type(tok.value)
# Determine the token type for strings enclosed in double quotes
self.lexer.input("\"filename\"")
tok = self.lexer.token()
if not tok or tok.value != "\"filename\"":
print("Couldn't determine string type")
else:
self.t_STRING = tok.type
# Determine the token type for whitespace--if any
self.lexer.input(" ")
tok = self.lexer.token()
if not tok or tok.value != " ":
self.t_SPACE = None
else:
self.t_SPACE = tok.type
# Determine the token type for newlines
self.lexer.input("\n")
tok = self.lexer.token()
if not tok or tok.value != "\n":
self.t_NEWLINE = None
print("Couldn't determine token for newlines")
else:
self.t_NEWLINE = tok.type
self.t_WS = (self.t_SPACE, self.t_NEWLINE)
# Check for other characters used by the preprocessor
chars = [ '<','>','#','##','\\','(',')',',','.']
for c in chars:
self.lexer.input(c)
tok = self.lexer.token()
if not tok or tok.value != c:
print("Unable to lex '%s' required for preprocessor" % c)
# ----------------------------------------------------------------------
# add_path()
#
# Adds a search path to the preprocessor.
# ----------------------------------------------------------------------
def add_path(self,path):
self.path.append(path)
# ----------------------------------------------------------------------
# group_lines()
#
# Given an input string, this function splits it into lines. Trailing whitespace
# is removed. Any line ending with \ is grouped with the next line. This
# function forms the lowest level of the preprocessor---grouping into text into
# a line-by-line format.
# ----------------------------------------------------------------------
def group_lines(self,input):
lex = self.lexer.clone()
lines = [x.rstrip() for x in input.splitlines()]
for i in xrange(len(lines)):
j = i+1
while lines[i].endswith('\\') and (j < len(lines)):
lines[i] = lines[i][:-1]+lines[j]
lines[j] = ""
j += 1
input = "\n".join(lines)
lex.input(input)
lex.lineno = 1
current_line = []
while True:
tok = lex.token()
if not tok:
break
current_line.append(tok)
if tok.type in self.t_WS and '\n' in tok.value:
yield current_line
current_line = []
if current_line:
yield current_line
# ----------------------------------------------------------------------
# tokenstrip()
#
# Remove leading/trailing whitespace tokens from a token list
# ----------------------------------------------------------------------
def tokenstrip(self,tokens):
i = 0
while i < len(tokens) and tokens[i].type in self.t_WS:
i += 1
del tokens[:i]
i = len(tokens)-1
while i >= 0 and tokens[i].type in self.t_WS:
i -= 1
del tokens[i+1:]
return tokens
# ----------------------------------------------------------------------
# collect_args()
#
# Collects comma separated arguments from a list of tokens. The arguments
# must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
# where tokencount is the number of tokens consumed, args is a list of arguments,
# and positions is a list of integers containing the starting index of each
# argument. Each argument is represented by a list of tokens.
#
# When collecting arguments, leading and trailing whitespace is removed
# from each argument.
#
# This function properly handles nested parenthesis and commas---these do not
# define new arguments.
# ----------------------------------------------------------------------
def collect_args(self,tokenlist):
args = []
positions = []
current_arg = []
nesting = 1
tokenlen = len(tokenlist)
# Search for the opening '('.
i = 0
while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
i += 1
if (i < tokenlen) and (tokenlist[i].value == '('):
positions.append(i+1)
else:
self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
return 0, [], []
i += 1
while i < tokenlen:
t = tokenlist[i]
if t.value == '(':
current_arg.append(t)
nesting += 1
elif t.value == ')':
nesting -= 1
if nesting == 0:
if current_arg:
args.append(self.tokenstrip(current_arg))
positions.append(i)
return i+1,args,positions
current_arg.append(t)
elif t.value == ',' and nesting == 1:
args.append(self.tokenstrip(current_arg))
positions.append(i+1)
current_arg = []
else:
current_arg.append(t)
i += 1
# Missing end argument
self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
return 0, [],[]
# ----------------------------------------------------------------------
# macro_prescan()
#
# Examine the macro value (token sequence) and identify patch points
# This is used to speed up macro expansion later on---we'll know
# right away where to apply patches to the value to form the expansion
# ----------------------------------------------------------------------
def macro_prescan(self,macro):
macro.patch = [] # Standard macro arguments
macro.str_patch = [] # String conversion expansion
macro.var_comma_patch = [] # Variadic macro comma patch
i = 0
while i < len(macro.value):
if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
argnum = macro.arglist.index(macro.value[i].value)
# Conversion of argument to a string
if i > 0 and macro.value[i-1].value == '#':
macro.value[i] = copy.copy(macro.value[i])
macro.value[i].type = self.t_STRING
del macro.value[i-1]
macro.str_patch.append((argnum,i-1))
continue
# Concatenation
elif (i > 0 and macro.value[i-1].value == '##'):
macro.patch.append(('c',argnum,i-1))
del macro.value[i-1]
i -= 1
continue
elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
macro.patch.append(('c',argnum,i))
del macro.value[i + 1]
continue
# Standard expansion
else:
macro.patch.append(('e',argnum,i))
elif macro.value[i].value == '##':
if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
(macro.value[i+1].value == macro.vararg):
macro.var_comma_patch.append(i-1)
i += 1
macro.patch.sort(key=lambda x: x[2],reverse=True)
# ----------------------------------------------------------------------
# macro_expand_args()
#
# Given a Macro and list of arguments (each a token list), this method
# returns an expanded version of a macro. The return value is a token sequence
# representing the replacement macro tokens
# ----------------------------------------------------------------------
def macro_expand_args(self,macro,args):
# Make a copy of the macro token sequence
rep = [copy.copy(_x) for _x in macro.value]
# Make string expansion patches. These do not alter the length of the replacement sequence
str_expansion = {}
for argnum, i in macro.str_patch:
if argnum not in str_expansion:
str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
rep[i] = copy.copy(rep[i])
rep[i].value = str_expansion[argnum]
# Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
comma_patch = False
if macro.variadic and not args[-1]:
for i in macro.var_comma_patch:
rep[i] = None
comma_patch = True
# Make all other patches. The order of these matters. It is assumed that the patch list
# has been sorted in reverse order of patch location since replacements will cause the
# size of the replacement sequence to expand from the patch point.
expanded = { }
for ptype, argnum, i in macro.patch:
# Concatenation. Argument is left unexpanded
if ptype == 'c':
rep[i:i+1] = args[argnum]
# Normal expansion. Argument is macro expanded first
elif ptype == 'e':
if argnum not in expanded:
expanded[argnum] = self.expand_macros(args[argnum])
rep[i:i+1] = expanded[argnum]
# Get rid of removed comma if necessary
if comma_patch:
rep = [_i for _i in rep if _i]
return rep
# ----------------------------------------------------------------------
# expand_macros()
#
# Given a list of tokens, this function performs macro expansion.
# The expanded argument is a dictionary that contains macros already
# expanded. This is used to prevent infinite recursion.
# ----------------------------------------------------------------------
def expand_macros(self,tokens,expanded=None):
if expanded is None:
expanded = {}
i = 0
while i < len(tokens):
t = tokens[i]
if t.type == self.t_ID:
if t.value in self.macros and t.value not in expanded:
# Yes, we found a macro match
expanded[t.value] = True
m = self.macros[t.value]
if not m.arglist:
# A simple macro
ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
for e in ex:
e.lineno = t.lineno
tokens[i:i+1] = ex
i += len(ex)
else:
# A macro with arguments
j = i + 1
while j < len(tokens) and tokens[j].type in self.t_WS:
j += 1
if j < len(tokens) and tokens[j].value == '(':
tokcount,args,positions = self.collect_args(tokens[j:])
if not m.variadic and len(args) != len(m.arglist):
self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
i = j + tokcount
elif m.variadic and len(args) < len(m.arglist)-1:
if len(m.arglist) > 2:
self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
else:
self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
i = j + tokcount
else:
if m.variadic:
if len(args) == len(m.arglist)-1:
args.append([])
else:
args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
del args[len(m.arglist):]
# Get macro replacement text
rep = self.macro_expand_args(m,args)
rep = self.expand_macros(rep,expanded)
for r in rep:
r.lineno = t.lineno
tokens[i:j+tokcount] = rep
i += len(rep)
else:
# This is not a macro. It is just a word which
# equals to name of the macro. Hence, go to the
# next token.
i += 1
del expanded[t.value]
continue
elif t.value == '__LINE__':
t.type = self.t_INTEGER
t.value = self.t_INTEGER_TYPE(t.lineno)
i += 1
return tokens
# ----------------------------------------------------------------------
# evalexpr()
#
# Evaluate an expression token sequence for the purposes of evaluating
# integral expressions.
# ----------------------------------------------------------------------
def evalexpr(self,tokens):
# tokens = tokenize(line)
# Search for defined macros
i = 0
while i < len(tokens):
if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
j = i + 1
needparen = False
result = "0L"
while j < len(tokens):
if tokens[j].type in self.t_WS:
j += 1
continue
elif tokens[j].type == self.t_ID:
if tokens[j].value in self.macros:
result = "1L"
else:
result = "0L"
if not needparen: break
elif tokens[j].value == '(':
needparen = True
elif tokens[j].value == ')':
break
else:
self.error(self.source,tokens[i].lineno,"Malformed defined()")
j += 1
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE(result)
del tokens[i+1:j+1]
i += 1
tokens = self.expand_macros(tokens)
for i,t in enumerate(tokens):
if t.type == self.t_ID:
tokens[i] = copy.copy(t)
tokens[i].type = self.t_INTEGER
tokens[i].value = self.t_INTEGER_TYPE("0L")
elif t.type == self.t_INTEGER:
tokens[i] = copy.copy(t)
# Strip off any trailing suffixes
tokens[i].value = str(tokens[i].value)
while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
tokens[i].value = tokens[i].value[:-1]
expr = "".join([str(x.value) for x in tokens])
expr = expr.replace("&&"," and ")
expr = expr.replace("||"," or ")
expr = expr.replace("!"," not ")
try:
result = eval(expr)
except Exception:
self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
result = 0
return result
# ----------------------------------------------------------------------
# parsegen()
#
# Parse an input string/
# ----------------------------------------------------------------------
def parsegen(self,input,source=None):
# Replace trigraph sequences
t = trigraph(input)
lines = self.group_lines(t)
if not source:
source = ""
self.define("__FILE__ \"%s\"" % source)
self.source = source
chunk = []
enable = True
iftrigger = False
ifstack = []
for x in lines:
for i,tok in enumerate(x):
if tok.type not in self.t_WS: break
if tok.value == '#':
# Preprocessor directive
# insert necessary whitespace instead of eaten tokens
for tok in x:
if tok.type in self.t_WS and '\n' in tok.value:
chunk.append(tok)
dirtokens = self.tokenstrip(x[i+1:])
if dirtokens:
name = dirtokens[0].value
args = self.tokenstrip(dirtokens[1:])
else:
name = ""
args = []
if name == 'define':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.define(args)
elif name == 'include':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
oldfile = self.macros['__FILE__']
for tok in self.include(args):
yield tok
self.macros['__FILE__'] = oldfile
self.source = source
elif name == 'undef':
if enable:
for tok in self.expand_macros(chunk):
yield tok
chunk = []
self.undef(args)
elif name == 'ifdef':
ifstack.append((enable,iftrigger))
if enable:
if not args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'ifndef':
ifstack.append((enable,iftrigger))
if enable:
if args[0].value in self.macros:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'if':
ifstack.append((enable,iftrigger))
if enable:
result = self.evalexpr(args)
if not result:
enable = False
iftrigger = False
else:
iftrigger = True
elif name == 'elif':
if ifstack:
if ifstack[-1][0]: # We only pay attention if outer "if" allows this
if enable: # If already true, we flip enable False
enable = False
elif not iftrigger: # If False, but not triggered yet, we'll check expression
result = self.evalexpr(args)
if result:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
elif name == 'else':
if ifstack:
if ifstack[-1][0]:
if enable:
enable = False
elif not iftrigger:
enable = True
iftrigger = True
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
elif name == 'endif':
if ifstack:
enable,iftrigger = ifstack.pop()
else:
self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
else:
# Unknown preprocessor directive
pass
else:
# Normal text
if enable:
chunk.extend(x)
for tok in self.expand_macros(chunk):
yield tok
chunk = []
# ----------------------------------------------------------------------
# include()
#
# Implementation of file-inclusion
# ----------------------------------------------------------------------
def include(self,tokens):
# Try to extract the filename and then process an include file
if not tokens:
return
if tokens:
if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
tokens = self.expand_macros(tokens)
if tokens[0].value == '<':
# Include <...>
i = 1
while i < len(tokens):
if tokens[i].value == '>':
break
i += 1
else:
print("Malformed #include <...>")
return
filename = "".join([x.value for x in tokens[1:i]])
path = self.path + [""] + self.temp_path
elif tokens[0].type == self.t_STRING:
filename = tokens[0].value[1:-1]
path = self.temp_path + [""] + self.path
else:
print("Malformed #include statement")
return
for p in path:
iname = os.path.join(p,filename)
try:
data = open(iname,"r").read()
dname = os.path.dirname(iname)
if dname:
self.temp_path.insert(0,dname)
for tok in self.parsegen(data,filename):
yield tok
if dname:
del self.temp_path[0]
break
except IOError:
pass
else:
print("Couldn't find '%s'" % filename)
# ----------------------------------------------------------------------
# define()
#
# Define a new macro
# ----------------------------------------------------------------------
def define(self,tokens):
if isinstance(tokens,STRING_TYPES):
tokens = self.tokenize(tokens)
linetok = tokens
try:
name = linetok[0]
if len(linetok) > 1:
mtype = linetok[1]
else:
mtype = None
if not mtype:
m = Macro(name.value,[])
self.macros[name.value] = m
elif mtype.type in self.t_WS:
# A normal macro
m = Macro(name.value,self.tokenstrip(linetok[2:]))
self.macros[name.value] = m
elif mtype.value == '(':
# A macro with arguments
tokcount, args, positions = self.collect_args(linetok[1:])
variadic = False
for a in args:
if variadic:
print("No more arguments may follow a variadic argument")
break
astr = "".join([str(_i.value) for _i in a])
if astr == "...":
variadic = True
a[0].type = self.t_ID
a[0].value = '__VA_ARGS__'
variadic = True
del a[1:]
continue
elif astr[-3:] == "..." and a[0].type == self.t_ID:
variadic = True
del a[1:]
# If, for some reason, "." is part of the identifier, strip off the name for the purposes
# of macro expansion
if a[0].value[-3:] == '...':
a[0].value = a[0].value[:-3]
continue
if len(a) > 1 or a[0].type != self.t_ID:
print("Invalid macro argument")
break
else:
mvalue = self.tokenstrip(linetok[1+tokcount:])
i = 0
while i < len(mvalue):
if i+1 < len(mvalue):
if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
del mvalue[i]
continue
elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
del mvalue[i+1]
i += 1
m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
self.macro_prescan(m)
self.macros[name.value] = m
else:
print("Bad macro definition")
except LookupError:
print("Bad macro definition")
# ----------------------------------------------------------------------
# undef()
#
# Undefine a macro
# ----------------------------------------------------------------------
def undef(self,tokens):
id = tokens[0].value
try:
del self.macros[id]
except LookupError:
pass
# ----------------------------------------------------------------------
# parse()
#
# Parse input text.
# ----------------------------------------------------------------------
def parse(self,input,source=None,ignore={}):
self.ignore = ignore
self.parser = self.parsegen(input,source)
# ----------------------------------------------------------------------
# token()
#
# Method to return individual tokens
# ----------------------------------------------------------------------
def token(self):
try:
while True:
tok = next(self.parser)
if tok.type not in self.ignore: return tok
except StopIteration:
self.parser = None
return None
if __name__ == '__main__':
import ply.lex as lex
lexer = lex.lex()
# Run a preprocessor
import sys
f = open(sys.argv[1])
input = f.read()
p = Preprocessor(lexer)
p.parse(input,sys.argv[1])
while True:
tok = p.token()
if not tok: break
print(p.source, tok)
| isc |
dumoulinj/ers | ers_backend/video_processor/models.py | 1 | 25636 | import json
import logging
import cv2, os
from cv2 import cv
from django.conf import settings
from django.db import models
from math import exp
from django_enumfield import enum
from model_utils.managers import InheritanceManager
from PIL import Image, ImageStat
import time
import numpy
from dataset_manager.enums import ComputingStateType, FeatureFunctionType
from video_processor.enums import ShotBoundaryType, ShotBoundariesDetectionAlgorithmType
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------------------------------------------------
# Shot detection models
#-----------------------------------------------------------------------------------------------------------------------
class ShotsDetection(models.Model):
"""
Model representing one particular shots detection execution, with its associated configuration. It contains
a list of algos that are used, and a list of resulting videos shots.
"""
date = models.DateTimeField(auto_now=True)
def _make_decision(self, results):
"""
TODO
"""
# TODO
is_shot = False
confidence = 0
threshold = results[0]["threshold"]
sumConfidence = 0.0
sumWeight = 0.0
for result in results:
# logger.info(result)
# # Temp: if one is True, so its True
# if result["result"] == True:
# is_shot = True
confidence = result["confidence"]
# print(confidence)
# break
sumConfidence = sumConfidence + (result["confidence"] * result["weight"])
sumWeight = sumWeight + result["weight"]
# logger.info(is_shot)
# logger.info(sumConfidence)
# logger.info(sumWeight)
# logger.info(threshold)
is_shot = (sumConfidence / sumWeight) >= threshold
# if(result["result"] or is_shot):
# print(threshold)
# print(sumConfidence)
# print(result["result"])
# print(is_shot)
return is_shot, sumConfidence
def detect(self, video):
"""
Process the video and detect shot boundaries. Store shots and shot boundaries objects in corresponding model
lists.
"""
MIN_SHOT_DELAY = 5
logger.info("Shot detection for %s", video.path)
# Create and prepare video_shots_results instance
video_shots_result = VideoShotsResult()
video_shots_result.shots_detection = self
video_shots_result.video = video
video_shots_result.save()
# Create video capture
capture = cv2.VideoCapture(video.path)
# Prepare variables
previous_frame = None
shot_nb = 0
crt_frame_nb = 0
previous_frame_nb = 0
last_shot_endframe_nb = -10000
start_frame_nb = 0
nb_total_frames = video.nb_frames
while True:
f, crt_frame = capture.read()
if crt_frame is None:
# End of video
break
results = list()
# Compare last 2 frames
if not previous_frame is None:
#crt_frame_timestamp = capture.get(cv.CV_CAP_PROP_POS_MSEC)
crt_frame_nb = capture.get(cv.CV_CAP_PROP_POS_FRAMES)
#print crt_frame_nb
# cv2.imshow('prev', previous_frame)
# cv2.imshow('crt', crt_frame)
# cv2.waitKey(100)
# Two shots need to be separated at least by MIN_SHOT_DELAY frames
if crt_frame_nb - last_shot_endframe_nb > MIN_SHOT_DELAY:
# Apply all algos
#for algo in self.algos:
for algo in self.algos.all().select_subclasses():
result, confidence, diff = algo.is_boundary(previous_frame, crt_frame)
results.append({"algo": algo, "result": result, "confidence": confidence, "diff": diff,"weight": algo.weight,"threshold": algo.threshold})
# Make a decision by merging algos decisions
is_shot, confidence = self._make_decision(results)
# If there is a shot, we append the shot and the shot boundary entries to the corresponding lists
if is_shot:
end_frame_nb = previous_frame_nb
# Create shot and shot_boundary, and associate it with shots_detection
shot = Shot(video_shots_result=video_shots_result, shot_nb=shot_nb, start_frame=start_frame_nb, end_frame=end_frame_nb)
shot.save()
shot_boundary = ShotBoundary(video_shots_result=video_shots_result, frame=end_frame_nb)
shot_boundary.save()
shot_nb += 1
start_frame_nb = crt_frame_nb
last_shot_endframe_nb = previous_frame_nb
previous_frame = crt_frame
previous_frame_nb = crt_frame_nb
# Show advance
if settings.DEBUG:
if crt_frame_nb % 100 == 0:
percentage = int(crt_frame_nb * 100. / nb_total_frames)
print('Shot detection: {}/{} - {}%\r'.format(int(crt_frame_nb), int(nb_total_frames), percentage)),
print
# Add last shot
end_frame_nb = previous_frame_nb - 1
shot = Shot(video_shots_result=video_shots_result, shot_nb=shot_nb, start_frame=start_frame_nb, end_frame=end_frame_nb)
shot.save()
# Save models
video_shots_result.save()
self.save()
# Log info
logger.info("Number of detected shots: %s", shot_nb)
return shot_nb
def evaluate(self, video):
"""
Evaluate results of shot boundary detection against ground truth. Compute precision and recall and store in
model. Add also missed shot boundaries, allowing to visualize it later.
"""
logger.info("Evaluate video: %s", video.path)
margin = 10
thruth = video.shot_boundaries_ground_truth if isinstance(video.shot_boundaries_ground_truth,list) else eval(video.shot_boundaries_ground_truth)
gt_shot_boundaries = thruth
video_shots_result = self.video_shots_results.get(video=video)
shot_boundaries_iterator = iter(list(video_shots_result.shot_boundaries.all()))
nb_true_positives = 0
nb_false_positives = 0
nb_misses = 0
shot_boundary = shot_boundaries_iterator.next()
shot_boundaries_iterator_end = False
for gt_frame in gt_shot_boundaries:
if not shot_boundaries_iterator_end:
while abs(gt_frame - shot_boundary.frame) > margin and shot_boundary.frame - margin < gt_frame:
# False positive
shot_boundary.type = ShotBoundaryType.FALSE_POSITIVE
#new_shot_boundaries.append(shot_boundary)
shot_boundary.save()
nb_false_positives += 1
try:
shot_boundary = shot_boundaries_iterator.next()
except StopIteration:
shot_boundaries_iterator_end = True
break
if abs(gt_frame - shot_boundary.frame) <= margin and shot_boundary.type != ShotBoundaryType.MISS:
# True positive
shot_boundary.type = ShotBoundaryType.TRUE_POSITIVE
#new_shot_boundaries.append(shot_boundary)
shot_boundary.save()
nb_true_positives += 1
try:
shot_boundary = shot_boundaries_iterator.next()
except StopIteration:
shot_boundaries_iterator_end = True
else:
# Miss
miss_shot_boundary = ShotBoundary()
miss_shot_boundary.video_shots_result = video_shots_result
miss_shot_boundary.frame = gt_frame
miss_shot_boundary.type = ShotBoundaryType.MISS
miss_shot_boundary.save()
#new_shot_boundaries.append(miss_shot_boundary)
nb_misses += 1
else:
# Miss
miss_shot_boundary = ShotBoundary()
miss_shot_boundary.video_shots_result = video_shots_result
miss_shot_boundary.frame = gt_frame
miss_shot_boundary.type = ShotBoundaryType.MISS
miss_shot_boundary.save()
#new_shot_boundaries.append(miss_shot_boundary)
nb_misses += 1
# Precision: TruePos / (TruePos + FalsePos) ; Recall: TruePos / (TruePos + Misses)
precision = float(nb_true_positives) / float(nb_true_positives + nb_false_positives)
recall = float(nb_true_positives) / float(nb_true_positives + nb_misses)
#print "True positives: ", nb_true_positives, "False positives: ", nb_false_positives, "Misses: ", nb_misses
logger.debug("True positives: %s, False positives: %s, Misses: %s", nb_true_positives, nb_false_positives, nb_misses)
#print "Precision: ", precision, "Recall: ", recall
logger.debug("Precision: %s, Recall: %s", precision, recall)
#video_shots.shot_boundaries = new_shot_boundaries
video_shots_result.precision = precision
video_shots_result.recall = recall
video_shots_result.save()
def take_thumbnails(self,video):
# Create video capture
capture = cv2.VideoCapture(video.path)
run = True
# Process the video frame by frame
while run:
f, crt_frame = capture.read()
if crt_frame == None:
# End of video
break
video_shots_result = self.video_shots_results.get(video=video)
shots = video_shots_result.shots.all()
for s in shots:
if s == shots[len(shots)-1]:
break
shot_path_out = os.path.join(video.dataset.converted_video_folder_path,"shots","video_"+str(video.id),'shot_result_'+str(self.id),'shot_'+str(s.id),)
try:
os.makedirs(shot_path_out)
except:
pass
start = s.start_frame
mid = s.start_frame+((s.end_frame-s.start_frame)/2)
end = s.end_frame
# print('%s %s %s %s ',s.id,start,mid,end)
if start > mid or start > end or mid > end:
print("errror in frame")
capture.set(cv.CV_CAP_PROP_POS_FRAMES, start)
t,img = capture.retrieve()
img = cv2.resize(img,(125,int(video.video_part.height/(float(video.video_part.width)/125))))
cv2.imwrite(os.path.join(shot_path_out,'start.png'),img)
capture.set(cv.CV_CAP_PROP_POS_FRAMES, mid)
t,img = capture.retrieve()
img = cv2.resize(img,(125,int(video.video_part.height/(float(video.video_part.width)/125))))
cv2.imwrite(os.path.join(shot_path_out,'mid.png'),img)
capture.set(cv.CV_CAP_PROP_POS_FRAMES, end)
t,img = capture.retrieve()
img = cv2.resize(img,(125,int(video.video_part.height/(float(video.video_part.width)/125))))
cv2.imwrite(os.path.join(shot_path_out,'end.png'),img)
run = False
print
# def setAlgos(self,algos):
# algo = None
# # Configure ECR algo
# for x in algos:
# print(x['value'])
# if(x['value'] == ShotAlgoType.ECR):
# algo = ECR()
# elif (x['value'] == ShotAlgoType.COLORHISTOGRAM):
# algo = ColorHistograms()
# algo.shots_detection = self
# algo.threshold = x['t']
# algo.save()
class VideoShotsResult(models.Model):
"""
Intermediary model to link a multimedia element to its list of shots.
"""
video = models.ForeignKey('dataset_manager.Video', related_name='video_shots_results')
shots_detection = models.ForeignKey(ShotsDetection, related_name='video_shots_results')
precision = models.FloatField(default=-1.)
recall = models.FloatField(default=-1.)
comment = models.CharField(max_length=255)
date = models.DateTimeField(auto_now_add=True,null=True)
def _configuration_as_string(self):
configuration_str = ""
algos = self.shots_detection.algos
for algo in algos.all():
name = algo.name
weight = algo.weight
threshold = algo.threshold
algo_string = name + "(threshold=" + str(threshold) + "; weight=" + str(weight) + ")"
if configuration_str != "":
configuration_str += ", "
configuration_str += algo_string
return configuration_str
configuration_as_string = property(_configuration_as_string)
def compute_shot_cut_density(self):
"""
For a given list of shots, compute the shot cut density, and return a dictionary containing the shot cut density
for each frame.
Returned dictionary looks like:
{'shot_cut_density': [0.28898, 0.2238, ..., 0.123345]}
Formula: c(k) = exp(1-n(k))/r)
where n(k) is the number of frames of shot including the k-th video frame
and r is a constant determining the way the c(k) values are distributed on the scale between 0 and 1. See
[Hanjalic 2005, Affective video content representation and modeling. IEEE Trans Multimed 7(1):154-154] for r value.
"""
r = 300.
shot_cut_density = list()
for shot in self.shots.all():
n_k = shot.end_frame - shot.start_frame
c_k = exp((1-n_k)/r)
for i in range(shot.start_frame, shot.end_frame+1):
shot_cut_density.append([i, c_k])
# if len(shot_cut_density["shot_cut_density"]) > 1:
# # Not sure why, but there is one entry too much, so pop!
# shot_cut_density["shot_cut_density"].pop()
return {FeatureFunctionType.VALUE: shot_cut_density}
class Shot(models.Model):
"""
Model representing a detected shot.
"""
video_shots_result = models.ForeignKey(VideoShotsResult, related_name='shots')
shot_nb = models.PositiveIntegerField()
start_frame = models.PositiveIntegerField()
end_frame = models.PositiveIntegerField()
def _frame_number(self):
return self.end_frame - self.start_frame
frame_number = property(_frame_number)
class Meta:
ordering = ['shot_nb']
class ShotBoundary(models.Model):
"""
Model representing a shot boundary.
"""
video_shots_result = models.ForeignKey(VideoShotsResult, related_name='shot_boundaries')
frame = models.PositiveIntegerField()
type = enum.EnumField(ShotBoundaryType, default=ShotBoundaryType.NONE)
class Meta:
ordering = ['frame']
#-----------------------------------------------------------------------------------------------------------------------
# Shot detection algorithms
#-----------------------------------------------------------------------------------------------------------------------
class ShotDetectionAlgo(models.Model):
"""
Parent model representing a shot detection algo.
"""
shots_detection = models.ForeignKey(ShotsDetection, related_name='algos')
weight = models.FloatField(default=1.)
threshold = models.FloatField()
objects = InheritanceManager()
type = enum.EnumField(ShotBoundariesDetectionAlgorithmType, default=ShotBoundariesDetectionAlgorithmType.NONE)
def _name(self):
return ShotBoundariesDetectionAlgorithmType.labels[self.type]
name = property(_name)
@staticmethod
def get_AlgoList():
list = []
for k, v in zip(ShotBoundariesDetectionAlgorithmType.labels.viewvalues(),ShotBoundariesDetectionAlgorithmType.labels.keys()):
d = {}
d['key'] = k
d['value'] = v
list.append(d)
return list
class ColorHistograms(ShotDetectionAlgo):
"""
Shot detection algo based on color histograms comparison.
"""
# def __init__(self):
# self.type = ShotBoundariesDetectionAlgorithmType.COLOR_HISTOGRAM
def _get_hist_comp(self, img1, img2):
"""
Compute the comparison of img1 and img2 histograms, using the CV_COMP_CORREL method.
"""
# Convert imgs to HSV
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2HSV)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2HSV)
# cv2.imshow('img1', img1)
# cv2.imshow('img2', img2)
# cv2.waitKey(1)
# Histogram for 1st image
hist_a1_hs = cv2.calcHist(img1, [0, 1], None, [30, 32], [0, 180, 0, 256])
cv2.normalize(hist_a1_hs, hist_a1_hs, 0, 255, cv2.NORM_MINMAX)
# Histogram for 2nd image
hist_a2_hs = cv2.calcHist(img2, [0, 1], None, [30, 32], [0, 180, 0, 256])
cv2.normalize(hist_a2_hs, hist_a2_hs, 0, 255, cv2.NORM_MINMAX)
# Compare
result = cv2.compareHist(hist_a1_hs, hist_a2_hs, cv.CV_COMP_CORREL)
return result
def is_boundary(self, img1, img2):
"""
Check if there seems to be a boundary between img1 and img2.
return True or False, confidence, difference with threshold
"""
confidence = self._get_hist_comp(img1, img2)
diff = confidence - self.threshold
if diff >= 0:
return False, confidence, diff
else:
return True, confidence, diff
class ECR(ShotDetectionAlgo):
"""
Shot detection algo based on the Edge Change Ratio technique, with a small tweak (see my SIGMAP'12 paper).
"""
def _get_edge_comp(self, img1, img2):
"""
"""
# Convert to grey
_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Blur
_img1 = cv2.blur(_img1, (3,3))
_img2 = cv2.blur(_img2, (3,3))
# Canny edge detection with aperture=3 for Sobel
low_threshold = 75
sobel_aperture = 3
_img1 = cv2.Canny(_img1, low_threshold, 3*low_threshold, sobel_aperture)
_img2 = cv2.Canny(_img2, low_threshold, 3*low_threshold, sobel_aperture)
# Invert image to white background
i_img1 = numpy.invert(_img1)
i_img2 = numpy.invert(_img2)
# Count nb of edge pixels
s1 = cv2.countNonZero(_img1)
s2 = cv2.countNonZero(_img2)
# Dilate
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(5,5))
d_img1 = cv2.dilate(_img1, element)
d_img2 = cv2.dilate(_img2, element)
# Creating in/out edges pixels images
im_in = _img2 & numpy.invert(d_img1)
im_out = _img1 & numpy.invert(d_img2)
# Compute in/out ECR
try:
ecr_in = cv2.countNonZero(im_in) / float(s2)
except ZeroDivisionError:
# Image all black!
ecr_in = cv2.countNonZero(im_in)
try:
ecr_out = cv2.countNonZero(im_out) / float(s1)
except ZeroDivisionError:
# Image all black!
ecr_out = cv2.countNonZero(im_out)
ecr = max(ecr_in, ecr_out)
return ecr
def is_boundary(self, img1, img2):
"""
return True or False, confidence, difference with threshold
"""
confidence = self._get_edge_comp(img1, img2)
diff = self.threshold - confidence
# cv2.imshow('img1', img1)
# cv2.imshow('img2', img2)
# cv2.waitKey(50)
if diff >= 0:
return False, confidence, diff
else:
# cv2.namedWindow('im_in', cv2.CV_WINDOW_AUTOSIZE)
# cv2.namedWindow('im_out', cv2.CV_WINDOW_AUTOSIZE)
# False positive detection with transformation
# Convert to grey
_img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
_img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
transformation = cv2.estimateRigidTransform(_img1, _img2, fullAffine=True)
# cv2.imshow('im_in', _img1)
# cv2.imshow('im_out', _img2)
if transformation is None:
result = (True, 1, (1 - self.threshold))
else:
if abs(transformation[0][2]) > 200 or abs(transformation[1][2]) >= 200:
result = (True, confidence, diff)
else:
#False positive
result = (False, confidence, diff)
# if result[0]:
# print "SHOT"
# cv2.waitKey(5000)
return result
#-----------------------------------------------------------------------------------------------------------------------
# Divers
#-----------------------------------------------------------------------------------------------------------------------
# def prepare_video_shots(name="test_dataset", base_path="/Users/djo3l/Dev/Datasets/TestDataset"):
# dataset, created = Dataset.objects.get_or_create(name=name, base_path=base_path)
#
# # Prepare shot detection
# shots_detection = ShotsDetection()
# shots_detection.save()
#
# # Configure ECR algo
# ecr_algo = ECR()
# ecr_algo.shots_detection = shots_detection
# ecr_algo.threshold = 0.61
# ecr_algo.save()
#
#
# for video in dataset.videos.all():
# # Detect shot boundaries
# shots_detection.detect(video)
#
#
# def evaluate_video_shots(name="test_dataset", base_path="/Users/djo3l/Dev/Datasets/TestDataset"):
# dataset, created = Dataset.objects.get_or_create(name=name, base_path=base_path)
#
# shots_detection = ShotsDetection.objects.all().latest('date')
#
# for video in dataset.videos.all():
# # Evaluate the result
# shots_detection.evaluate(video)
#
#
#
# #-----------------------------------------------------------------------------------------------------------------------
# # Frame extraction models
# #-----------------------------------------------------------------------------------------------------------------------
#
# class VideoFrame(models.Model):
#
# """
# Model representing frames of a video
# """
# video_part = models.ForeignKey(VideoPart, related_name='frames')
# index = models.IntegerField() #in seconds
#
# def _path(self):
# return os.path.join(self.video_part.video.dataset.frame_path, self.video_part.video.name + "_" + str(self.index) + ".png")
# path = property(_path)
#
# def evaluate_brightness_for_image(self):
# #img = cv2.imread(self.path,1)
# img = Image.open(self.path)
# #logger.debug("The image is :" + str(img))
# brightness = 0
# # for rgbLine in img:
# # for rgb in rgbLine:
# # brightness += numpy.sqrt(0.299 * rgb[0] * rgb[0] + 0.587 * rgb[1] * rgb[1] + 0.114 * rgb[2] * rgb[2])
# imageGreyscale = img.convert('L')
# stat = ImageStat.Stat(imageGreyscale)
# brightness = stat.rms[0]
# return brightness
#
#
# #Saving video frames
# def prepare_video_frames(name="test_dataset", base_path="/Users/djo3l/Dev/Datasets/TestDataset"):
# """
# Extraction of one frame per second and saving images into a Frames folder for further use
# """
# logger.debug("Computing frames...")
# dataset = Dataset.objects.get(name=name, base_path=base_path)
# for video in dataset.videos.all():
# vidcap = cv2.VideoCapture(video.path)
# total_nb_of_frames = video.nb_frames
# logger.debug("FPS: " + str(video.video_part.fps))
# count = 0
# success = True
# while success:
# success, image = vidcap.read()
# if(success and ((vidcap.get(cv.CV_CAP_PROP_POS_FRAMES) % video.video_part.fps) == 0)):
# video_frame = VideoFrame()
# video_frame.video_part = video.video_part
# video_frame.index = count
# the_image = Image.fromarray(image, 'RGB')
# the_image.save(dataset.frame_path + "/" + video.name + "_" + str(count) + ".png")
# video_frame.save()
# count += 1
# # Show advance
# if settings.DEBUG:
# percentage = int(vidcap.get(cv.CV_CAP_PROP_POS_FRAMES) * 100. / total_nb_of_frames)
# print('Frame extraction: {}/{} - {}%\r'.format(int(vidcap.get(cv.CV_CAP_PROP_POS_FRAMES)), int(total_nb_of_frames), percentage)),
#
# print
# logger.info("Computing frames done: " + str(count) + "frames in total for video:" + video.name)
#prepare_video_shots("Shaefer2010", "/Users/djo3l/Dev/Datasets/Shaefer2010")
#prepare_video_shots("Manhob-hci", "/Users/djo3l/Dev/Datasets/manhob-hci")
#evaluate_video_shots()
#prepare_video_shots("test", "/Users/chassotce/Dev/ERS/Datasets/test")
#evaluate_video_shots()
#prepare_video_shots()
#evaluate_video_shots()
#prepare_video_shots("test", "/Users/chassotce/Dev/ERS/Datasets/test")
#prepare_video_frames("test", "/Users/chassotce/Dev/ERS/Datasets/test")
# prepare_video_shots("test", "/home/diana/ers/datasets/test")
# prepare_video_frames("test", "/home/diana/ers/datasets/test")
| mit |
monokoo/shadowsocks | shadowsocks/common.py | 1 | 13819 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import binascii
import re
import random
from shadowsocks import lru_cache
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
connect_log = logging.debug
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def random_base64_str(randomlength = 8):
str = ''
chars = 'ABCDEF0123456789'
length = len(chars) - 1
for i in range(randomlength):
str += chars[random.randint(0, length)]
return str
def int32(x):
if x > 0xFFFFFFFF or x < 0:
x &= 0xFFFFFFFF
if x > 0x7FFFFFFF:
x = int(0x100000000 - x)
if x < 0x80000000:
return -x
else:
return -2147483648
return x
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = ['%02X' % ord(x) for x in v4addr]
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def match_regex(regex, text):
regex = re.compile(regex)
for item in regex.findall(text):
return True
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def pre_parse_header(data):
if not data:
return None
datatype = ord(data[0])
if datatype == 0x80:
if len(data) <= 2:
return None
rand_data_size = ord(data[1])
if rand_data_size + 2 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 2:]
elif datatype == 0x81:
data = data[1:]
elif datatype == 0x82:
if len(data) <= 3:
return None
rand_data_size = struct.unpack('>H', data[1:3])[0]
if rand_data_size + 3 >= len(data):
logging.warn('header too short, maybe wrong password or '
'encryption method')
return None
data = data[rand_data_size + 3:]
elif datatype == 0x88 or (~datatype & 0xff) == 0x88:
if len(data) <= 7 + 7:
return None
data_size = struct.unpack('>H', data[1:3])[0]
ogn_data = data
data = data[:data_size]
crc = binascii.crc32(data) & 0xffffffff
if crc != 0xffffffff:
logging.warn('uncorrect CRC32, maybe wrong password or '
'encryption method')
return None
start_pos = 3 + ord(data[3])
data = data[start_pos:-4]
if data_size < len(ogn_data):
data += ogn_data[data_size:]
return data
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
connecttype = (addrtype & 0x8) and 1 or 0
addrtype &= ~0x8
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return connecttype, addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self.addrs_str = addrs
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def __cmp__(self, other):
return cmp(self.addrs_str, other.addrs_str)
def __eq__(self, other):
return self.addrs_str == other.addrs_str
def __ne__(self, other):
return self.addrs_str != other.addrs_str
class PortRange(object):
def __init__(self, range_str):
self.range_str = to_str(range_str)
self.range = set()
range_str = to_str(range_str).split(',')
for item in range_str:
try:
int_range = item.split('-')
if len(int_range) == 1:
if item:
self.range.add(int(item))
elif len(int_range) == 2:
int_range[0] = int(int_range[0])
int_range[1] = int(int_range[1])
if int_range[0] < 0:
int_range[0] = 0
if int_range[1] > 65535:
int_range[1] = 65535
i = int_range[0]
while i <= int_range[1]:
self.range.add(i)
i += 1
except Exception as e:
logging.error(e)
def __contains__(self, val):
return val in self.range
def __cmp__(self, other):
return cmp(self.range_str, other.range_str)
def __eq__(self, other):
return self.range_str == other.range_str
def __ne__(self, other):
return self.range_str != other.range_str
class UDPAsyncDNSHandler(object):
dns_cache = lru_cache.LRUCache(timeout=1800)
def __init__(self, params):
self.params = params
self.remote_addr = None
self.call_back = None
def resolve(self, dns_resolver, remote_addr, call_back):
if remote_addr in UDPAsyncDNSHandler.dns_cache:
if call_back:
call_back("", remote_addr, UDPAsyncDNSHandler.dns_cache[remote_addr], self.params)
else:
self.call_back = call_back
self.remote_addr = remote_addr
dns_resolver.resolve(remote_addr[0], self._handle_dns_resolved)
UDPAsyncDNSHandler.dns_cache.sweep()
def _handle_dns_resolved(self, result, error):
if error:
logging.error("%s when resolve DNS" % (error,)) #drop
return self.call_back(error, self.remote_addr, None, self.params)
if result:
ip = result[1]
if ip:
return self.call_back("", self.remote_addr, ip, self.params)
logging.warning("can't resolve %s" % (self.remote_addr,))
return self.call_back("fail to resolve", self.remote_addr, None, self.params)
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(0, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(0, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(0, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
egenerat/gae-django | djangoappengine/management/commands/deploy.py | 10 | 2540 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CHANGED: show warning if profiler is enabled, so you don't mistakenly upload
# with non-production settings. Also, added --nosyncdb switch.
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
import logging
import sys
import time
def run_appcfg(argv):
# We don't really want to use that one though, it just executes this one
from google.appengine.tools import appcfg
# Reset the logging level to WARN as appcfg will spew tons of logs on INFO
logging.getLogger().setLevel(logging.WARN)
new_args = argv[:]
new_args[1] = 'update'
new_args.append('.')
syncdb = True
if '--nosyncdb' in new_args:
syncdb = False
new_args.remove('--nosyncdb')
appcfg.main(new_args)
if syncdb:
print 'Running syncdb.'
# Wait a little bit for deployment to finish
for countdown in range(9, 0, -1):
sys.stdout.write('%s\r' % countdown)
time.sleep(1)
from django.db import connections
for connection in connections.all():
if hasattr(connection, 'setup_remote'):
connection.setup_remote()
call_command('syncdb', remote=True, interactive=True)
if getattr(settings, 'ENABLE_PROFILER', False):
print '--------------------------\n' \
'WARNING: PROFILER ENABLED!\n' \
'--------------------------'
class Command(BaseCommand):
"""Deploys the website to the production server.
Any additional arguments are passed directly to appcfg.py update
"""
help = 'Calls appcfg.py update for the current project.'
args = '[any appcfg.py options]'
def run_from_argv(self, argv):
if 'mediagenerator' in settings.INSTALLED_APPS:
call_command('generatemedia')
run_appcfg(argv)
| mit |
vmindru/ansible | test/units/modules/network/f5/test_bigip_pool_member.py | 25 | 8557 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_pool_member import ModuleParameters
from library.modules.bigip_pool_member import ApiParameters
from library.modules.bigip_pool_member import NodeApiParameters
from library.modules.bigip_pool_member import ModuleManager
from library.modules.bigip_pool_member import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_pool_member import ModuleParameters
from ansible.modules.network.f5.bigip_pool_member import ApiParameters
from ansible.modules.network.f5.bigip_pool_member import NodeApiParameters
from ansible.modules.network.f5.bigip_pool_member import ModuleManager
from ansible.modules.network.f5.bigip_pool_member import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
pool='my-pool',
address='1.2.3.4',
fqdn='fqdn.foo.bar',
name='my-name',
port=2345,
connection_limit=100,
description='this is a description',
rate_limit=70,
ratio=20,
preserve_node=False,
priority_group=10,
state='present',
partition='Common',
fqdn_auto_populate=False,
reuse_nodes=False,
)
p = ModuleParameters(params=args)
assert p.name == 'my-name'
def test_api_parameters(self):
args = load_fixture('load_net_node_with_fqdn.json')
p = ApiParameters(params=args)
assert p.state == 'present'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_reuse_node_with_name(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
fqdn='foo.bar.com',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
provider=dict(
password='password',
server='localhost',
user='admin'
)
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of,
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
def test_create_reuse_node_with_ipv4_address(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
address='7.3.67.8',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
provider=dict(
password='password',
server='localhost',
user='admin'
)
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_ipv4_address.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of,
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is False
assert results['address'] == '7.3.67.8'
assert results['state'] == 'present'
def test_create_reuse_node_with_fqdn_auto_populate(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
pool='my-pool',
fqdn='foo.bar.com',
port=2345,
state='present',
partition='Common',
reuse_nodes=True,
fqdn_auto_populate=False,
provider=dict(
password='password',
server='localhost',
user='admin'
)
))
current_node = NodeApiParameters(params=load_fixture('load_net_node_with_fqdn.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of,
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
mm.read_current_node_from_device = Mock(return_value=current_node)
results = mm.exec_module()
assert results['changed'] is True
assert results['fqdn_auto_populate'] is True
assert results['fqdn'] == 'foo.bar.com'
assert results['state'] == 'present'
def test_create_aggregate_pool_members(self, *args):
set_module_args(dict(
pool='fake_pool',
aggregate=[
dict(
name='my-name',
host="1.1.1.1",
port=1234,
state='present',
partition='Common',
reuse_nodes=True,
fqdn_auto_populate=False,
),
dict(
name='my-name2',
fqdn='google.com',
port=2423,
state='present',
partition='Common',
fqdn_auto_populate=True,
reuse_nodes=True,
)
],
provider=dict(
password='password',
server='localhost',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
mutually_exclusive=self.spec.mutually_exclusive,
required_one_of=self.spec.required_one_of,
)
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
nirmeshk/oh-mainline | vendor/packages/sphinx/sphinx/writers/latex.py | 15 | 57880 | # -*- coding: utf-8 -*-
"""
sphinx.writers.latex
~~~~~~~~~~~~~~~~~~~~
Custom docutils writer for LaTeX.
Much of this code is adapted from Dave Kuhlman's "docpy" writer from his
docutils sandbox.
:copyright: Copyright 2007-2013 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import sys
from os import path
from docutils import nodes, writers
from docutils.writers.latex2e import Babel
from sphinx import addnodes
from sphinx import highlighting
from sphinx.errors import SphinxError
from sphinx.locale import admonitionlabels, _
from sphinx.util import split_into
from sphinx.util.osutil import ustrftime
from sphinx.util.pycompat import any
from sphinx.util.texescape import tex_escape_map, tex_replace_map
from sphinx.util.smartypants import educate_quotes_latex
HEADER = r'''%% Generated by Sphinx.
\def\sphinxdocclass{%(docclass)s}
\documentclass[%(papersize)s,%(pointsize)s%(classoptions)s]{%(wrapperclass)s}
%(inputenc)s
%(utf8extra)s
%(cmappkg)s
%(fontenc)s
%(babel)s
%(fontpkg)s
%(fncychap)s
%(longtable)s
\usepackage{sphinx}
\usepackage{multirow}
%(preamble)s
\title{%(title)s}
\date{%(date)s}
\release{%(release)s}
\author{%(author)s}
\newcommand{\sphinxlogo}{%(logo)s}
\renewcommand{\releasename}{%(releasename)s}
%(makeindex)s
'''
BEGIN_DOC = r'''
\begin{document}
%(shorthandoff)s
%(maketitle)s
%(tableofcontents)s
'''
FOOTER = r'''
\renewcommand{\indexname}{%(indexname)s}
%(printindex)s
\end{document}
'''
class collected_footnote(nodes.footnote):
"""Footnotes that are collected are assigned this class."""
class UnsupportedError(SphinxError):
category = 'Markup is unsupported in LaTeX'
class LaTeXWriter(writers.Writer):
supported = ('sphinxlatex',)
settings_spec = ('LaTeX writer options', '', (
('Document name', ['--docname'], {'default': ''}),
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
settings_defaults = {}
output = None
def __init__(self, builder):
writers.Writer.__init__(self)
self.builder = builder
def translate(self):
visitor = LaTeXTranslator(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.astext()
# Helper classes
class ExtBabel(Babel):
def get_shorthandoff(self):
shortlang = self.language.split('_')[0]
if shortlang in ('de', 'ngerman', 'sl', 'slovene', 'pt', 'portuges',
'es', 'spanish', 'nl', 'dutch', 'pl', 'polish', 'it',
'italian'):
return '\\shorthandoff{"}'
return ''
def uses_cyrillic(self):
shortlang = self.language.split('_')[0]
return shortlang in ('bg','bulgarian', 'kk','kazakh',
'mn','mongolian', 'ru','russian',
'uk','ukrainian')
# in latest trunk, the attribute is called Babel.language_codes and already
# includes Slovene
if hasattr(Babel, '_ISO639_TO_BABEL'):
Babel._ISO639_TO_BABEL['sl'] = 'slovene'
class Table(object):
def __init__(self):
self.col = 0
self.colcount = 0
self.colspec = None
self.rowcount = 0
self.had_head = False
self.has_problematic = False
self.has_verbatim = False
self.caption = None
self.longtable = False
class LaTeXTranslator(nodes.NodeVisitor):
sectionnames = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
ignore_missing_images = False
default_elements = {
'papersize': 'letterpaper',
'pointsize': '10pt',
'classoptions': '',
'extraclassoptions': '',
'inputenc': '\\usepackage[utf8]{inputenc}',
'utf8extra': '\\DeclareUnicodeCharacter{00A0}{\\nobreakspace}',
'cmappkg': '\\usepackage{cmap}',
'fontenc': '\\usepackage[T1]{fontenc}',
'babel': '\\usepackage{babel}',
'fontpkg': '\\usepackage{times}',
'fncychap': '\\usepackage[Bjarne]{fncychap}',
'longtable': '\\usepackage{longtable}',
'preamble': '',
'title': '',
'date': '',
'release': '',
'author': '',
'logo': '',
'releasename': 'Release',
'makeindex': '\\makeindex',
'shorthandoff': '',
'maketitle': '\\maketitle',
'tableofcontents': '\\tableofcontents',
'footer': '',
'printindex': '\\printindex',
'transition': '\n\n\\bigskip\\hrule{}\\bigskip\n\n',
}
# sphinx specific document classes
docclasses = ('howto', 'manual')
def __init__(self, document, builder):
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
self.body = []
# sort out some elements
papersize = builder.config.latex_paper_size + 'paper'
if papersize == 'paper': # e.g. command line "-D latex_paper_size="
papersize = 'letterpaper'
self.elements = self.default_elements.copy()
self.elements.update({
'wrapperclass': self.format_docclass(document.settings.docclass),
'papersize': papersize,
'pointsize': builder.config.latex_font_size,
# if empty, the title is set to the first section title
'title': document.settings.title,
'release': builder.config.release,
'author': document.settings.author,
'releasename': _('Release'),
'preamble': builder.config.latex_preamble,
'indexname': _('Index'),
})
if document.settings.docclass == 'howto':
docclass = builder.config.latex_docclass.get('howto', 'article')
else:
docclass = builder.config.latex_docclass.get('manual', 'report')
self.elements['docclass'] = docclass
if builder.config.today:
self.elements['date'] = builder.config.today
else:
self.elements['date'] = ustrftime(builder.config.today_fmt
or _('%B %d, %Y'))
if builder.config.latex_logo:
self.elements['logo'] = '\\includegraphics{%s}\\par' % \
path.basename(builder.config.latex_logo)
if builder.config.language:
babel = ExtBabel(builder.config.language)
lang = babel.get_language()
if lang:
self.elements['classoptions'] += ',' + babel.get_language()
else:
self.builder.warn('no Babel option known for language %r' %
builder.config.language)
self.elements['shorthandoff'] = babel.get_shorthandoff()
self.elements['fncychap'] = '\\usepackage[Sonny]{fncychap}'
# Times fonts don't work with Cyrillic languages
if babel.uses_cyrillic():
self.elements['fontpkg'] = ''
# pTeX (Japanese TeX) for support
if builder.config.language == 'ja':
# use dvipdfmx as default class option in Japanese
self.elements['classoptions'] = ',dvipdfmx'
# disable babel which has not publishing quality in Japanese
self.elements['babel'] = ''
# disable fncychap in Japanese documents
self.elements['fncychap'] = ''
else:
self.elements['classoptions'] += ',english'
# allow the user to override them all
self.elements.update(builder.config.latex_elements)
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
self.highlighter = highlighting.PygmentsBridge('latex',
builder.config.pygments_style, builder.config.trim_doctest_flags)
self.context = []
self.descstack = []
self.bibitems = []
self.table = None
self.next_table_colspec = None
# stack of [language, linenothreshold] settings per file
# the first item here is the default and must not be changed
# the second item is the default for the master file and can be changed
# by .. highlight:: directive in the master file
self.hlsettingstack = 2 * [[builder.config.highlight_language,
sys.maxint]]
self.footnotestack = []
self.curfilestack = []
self.handled_abbrs = set()
if document.settings.docclass == 'howto':
self.top_sectionlevel = 2
else:
if builder.config.latex_use_parts:
self.top_sectionlevel = 0
else:
self.top_sectionlevel = 1
self.next_section_ids = set()
self.next_figure_ids = set()
self.next_table_ids = set()
# flags
self.verbatim = None
self.in_title = 0
self.in_production_list = 0
self.in_footnote = 0
self.in_caption = 0
self.first_document = 1
self.this_is_the_title = 1
self.literal_whitespace = 0
self.no_contractions = 0
self.compact_list = 0
self.first_param = 0
self.previous_spanning_row = 0
self.previous_spanning_column = 0
self.remember_multirow = {}
def format_docclass(self, docclass):
""" prepends prefix to sphinx document classes
"""
if docclass in self.docclasses:
docclass = 'sphinx' + docclass
return docclass
def astext(self):
return (HEADER % self.elements +
self.highlighter.get_stylesheet() +
u''.join(self.body) +
'\n' + self.elements['footer'] + '\n' +
self.generate_indices() +
FOOTER % self.elements)
def hypertarget(self, id, withdoc=True, anchor=True):
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (anchor and '\\phantomsection' or '') + \
'\\label{%s}' % self.idescape(id)
def hyperlink(self, id):
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id):
return '\\autopageref*{%s}' % self.idescape(id)
def idescape(self, id):
return unicode(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def generate_indices(self):
def generate(content, collapsed):
ret.append('\\begin{theindex}\n')
ret.append('\\def\\bigletter#1{{\\Large\\sffamily#1}'
'\\nopagebreak\\vspace{1mm}}\n')
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' %
unicode(letter).translate(tex_escape_map))
for entry in entries:
if not entry[3]:
continue
ret.append('\\item {\\texttt{%s}}' % self.encode(entry[0]))
if entry[4]:
# add "extra" info
ret.append(' \\emph{(%s)}' % self.encode(entry[4]))
ret.append(', \\pageref{%s:%s}\n' %
(entry[2], self.idescape(entry[3])))
ret.append('\\end{theindex}\n')
ret = []
# latex_domain_indices can be False/True or a list of index names
indices_config = self.builder.config.latex_domain_indices
if indices_config:
for domain in self.builder.env.domains.itervalues():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
# deprecated config value
if indexname == 'py-modindex' and \
not self.builder.config.latex_use_modindex:
continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
continue
ret.append(u'\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname)
generate(content, collapsed)
return ''.join(ret)
def visit_document(self, node):
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.body.append(BEGIN_DOC % self.elements)
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
self.body.append(u'\n\\appendix\n')
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
# "- 1" because the level is increased before the title is visited
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node):
if self.bibitems:
widest_label = ""
for bi in self.bibitems:
if len(widest_label) < len(bi[0]):
widest_label = bi[0]
self.body.append(u'\n\\begin{thebibliography}{%s}\n' % widest_label)
for bi in self.bibitems:
target = self.hypertarget(bi[2] + ':' + bi[3],
withdoc=False)
self.body.append(u'\\bibitem[%s]{%s}{%s %s}\n' %
(bi[0], self.idescape(bi[0]), target, bi[1]))
self.body.append(u'\\end{thebibliography}\n')
self.bibitems = []
def visit_start_of_file(self, node):
# collect new footnotes
self.footnotestack.append(self.collect_footnotes(node))
# also add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
# use default highlight settings for new file
self.hlsettingstack.append(self.hlsettingstack[0])
def collect_footnotes(self, node):
fnotes = {}
def footnotes_under(n):
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
for k in footnotes_under(c):
yield k
for fn in footnotes_under(node):
num = fn.children[0].astext().strip()
fnotes[num] = [collected_footnote(*fn.children), False]
return fnotes
def depart_start_of_file(self, node):
self.footnotestack.pop()
self.curfilestack.pop()
self.hlsettingstack.pop()
def visit_highlightlang(self, node):
self.hlsettingstack[-1] = [node['lang'], node['linenothreshold']]
raise nodes.SkipNode
def visit_section(self, node):
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
if node.get('ids'):
self.next_section_ids.update(node['ids'])
def depart_section(self, node):
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node):
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node):
self.body.append('}')
def visit_topic(self, node):
self.body.append('\\setbox0\\vbox{\n'
'\\begin{minipage}{0.95\\linewidth}\n')
def depart_topic(self, node):
self.body.append('\\end{minipage}}\n'
'\\begin{center}\\setlength{\\fboxsep}{5pt}'
'\\shadowbox{\\box0}\\end{center}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node):
pass
def depart_glossary(self, node):
pass
def visit_productionlist(self, node):
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node):
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node):
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
self.body.append('\\production{%s}{' % self.encode(tn))
else:
self.body.append('\\productioncont{')
def depart_production(self, node):
self.body.append('}\n')
def visit_transition(self, node):
self.body.append(self.elements['transition'])
def depart_transition(self, node):
pass
def visit_title(self, node):
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
raise nodes.SkipNode
elif self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
self.builder.warn('document title is not a single Text node',
(self.curfilestack[-1], node.line))
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
self.elements['title'] = node.astext().translate(tex_escape_map)
self.this_is_the_title = 0
raise nodes.SkipNode
elif isinstance(parent, nodes.section):
try:
self.body.append(r'\%s{' % self.sectionnames[self.sectionlevel])
except IndexError:
# just use "subparagraph", it's not numbered anyway
self.body.append(r'\%s{' % self.sectionnames[-1])
self.context.append('}\n')
if self.next_section_ids:
for id in self.next_section_ids:
self.context[-1] += self.hypertarget(id, anchor=False)
self.next_section_ids.clear()
elif isinstance(parent, (nodes.topic, nodes.sidebar)):
self.body.append(r'\textbf{')
self.context.append('}\n\n\medskip\n\n')
elif isinstance(parent, nodes.Admonition):
self.body.append('{')
self.context.append('}\n')
elif isinstance(parent, nodes.table):
self.table.caption = self.encode(node.astext())
raise nodes.SkipNode
else:
self.builder.warn(
'encountered title node not in section, topic, table, '
'admonition or sidebar',
(self.curfilestack[-1], node.line or ''))
self.body.append('\\textbf{')
self.context.append('}\n')
self.in_title = 1
def depart_title(self, node):
self.in_title = 0
self.body.append(self.context.pop())
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append('~\\\\\n\\textbf{')
self.context.append('}\n\\smallskip\n')
else:
self.context.append('')
def depart_subtitle(self, node):
self.body.append(self.context.pop())
def visit_desc(self, node):
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node):
self.body.append('\n\\end{fulllineitems}\n\n')
def visit_desc_signature(self, node):
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
hyper = ''
self.body.append(hyper)
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
break
else:
self.body.append(r'\pysigline{')
def depart_desc_signature(self, node):
self.body.append('}')
def visit_desc_addname(self, node):
self.body.append(r'\code{')
self.literal_whitespace += 1
def depart_desc_addname(self, node):
self.body.append('}')
self.literal_whitespace -= 1
def visit_desc_type(self, node):
pass
def depart_desc_type(self, node):
pass
def visit_desc_returns(self, node):
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node):
self.body.append(r'}')
def visit_desc_name(self, node):
self.body.append(r'\bfcode{')
self.no_contractions += 1
self.literal_whitespace += 1
def depart_desc_name(self, node):
self.body.append('}')
self.literal_whitespace -= 1
self.no_contractions -= 1
def visit_desc_parameterlist(self, node):
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node):
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node):
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
if not node.hasattr('noemph'):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node):
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node):
self.body.append(r'\optional{')
def depart_desc_optional(self, node):
self.body.append('}')
def visit_desc_annotation(self, node):
self.body.append(r'\strong{')
def depart_desc_annotation(self, node):
self.body.append('}')
def visit_desc_content(self, node):
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node):
pass
def visit_refcount(self, node):
self.body.append("\\emph{")
def depart_refcount(self, node):
self.body.append("}\\\\")
def visit_seealso(self, node):
self.body.append(u'\n\n\\strong{%s:}\n\n' % admonitionlabels['seealso'])
def depart_seealso(self, node):
self.body.append("\n\n")
def visit_rubric(self, node):
if len(node.children) == 1 and node.children[0].astext() in \
('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append('\\paragraph{')
self.context.append('}\n')
def depart_rubric(self, node):
self.body.append(self.context.pop())
def visit_footnote(self, node):
raise nodes.SkipNode
def visit_collected_footnote(self, node):
self.in_footnote += 1
self.body.append('\\footnote{')
def depart_collected_footnote(self, node):
self.body.append('}')
self.in_footnote -= 1
def visit_label(self, node):
if isinstance(node.parent, nodes.citation):
self.bibitems[-1][0] = node.astext()
self.bibitems[-1][2] = self.curfilestack[-1]
self.bibitems[-1][3] = node.parent['ids'][0]
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node):
if self.table:
raise UnsupportedError(
'%s:%s: nested tables are not yet implemented.' %
(self.curfilestack[-1], node.line or ''))
self.table = Table()
self.table.longtable = 'longtable' in node['classes']
self.tablebody = []
self.tableheaders = []
# Redirect body output until table is finished.
self._body = self.body
self.body = self.tablebody
def depart_table(self, node):
if self.table.rowcount > 30:
self.table.longtable = True
self.body = self._body
if not self.table.longtable and self.table.caption is not None:
self.body.append(u'\n\n\\begin{threeparttable}\n'
u'\\capstart\\caption{%s}\n' % self.table.caption)
if self.table.longtable:
self.body.append('\n\\begin{longtable}')
endmacro = '\\end{longtable}\n\n'
elif self.table.has_verbatim:
self.body.append('\n\\begin{tabular}')
endmacro = '\\end{tabular}\n\n'
elif self.table.has_problematic and not self.table.colspec:
# if the user has given us tabularcolumns, accept them and use
# tabulary nevertheless
self.body.append('\n\\begin{tabular}')
endmacro = '\\end{tabular}\n\n'
else:
self.body.append('\n\\begin{tabulary}{\\linewidth}')
endmacro = '\\end{tabulary}\n\n'
if self.table.colspec:
self.body.append(self.table.colspec)
else:
if self.table.has_problematic:
colwidth = 0.95 / self.table.colcount
colspec = ('p{%.3f\\linewidth}|' % colwidth) * \
self.table.colcount
self.body.append('{|' + colspec + '}\n')
elif self.table.longtable:
self.body.append('{|' + ('l|' * self.table.colcount) + '}\n')
else:
self.body.append('{|' + ('L|' * self.table.colcount) + '}\n')
if self.table.longtable and self.table.caption is not None:
self.body.append(u'\\caption{%s} \\\\\n' % self.table.caption)
if self.table.caption is not None:
for id in self.next_table_ids:
self.body.append(self.hypertarget(id, anchor=False))
self.next_table_ids.clear()
if self.table.longtable:
self.body.append('\\hline\n')
self.body.extend(self.tableheaders)
self.body.append('\\endfirsthead\n\n')
self.body.append('\\multicolumn{%s}{c}%%\n' % self.table.colcount)
self.body.append(r'{{\textsf{\tablename\ \thetable{} -- %s}}} \\'
% _('continued from previous page'))
self.body.append('\n\\hline\n')
self.body.extend(self.tableheaders)
self.body.append('\\endhead\n\n')
self.body.append(ur'\hline \multicolumn{%s}{|r|}{{\textsf{%s}}} \\ \hline'
% (self.table.colcount,
_('Continued on next page')))
self.body.append('\n\\endfoot\n\n')
self.body.append('\\endlastfoot\n\n')
else:
self.body.append('\\hline\n')
self.body.extend(self.tableheaders)
self.body.extend(self.tablebody)
self.body.append(endmacro)
if not self.table.longtable and self.table.caption is not None:
self.body.append('\\end{threeparttable}\n\n')
self.table = None
self.tablebody = None
def visit_colspec(self, node):
self.table.colcount += 1
def depart_colspec(self, node):
pass
def visit_tgroup(self, node):
pass
def depart_tgroup(self, node):
pass
def visit_thead(self, node):
self.table.had_head = True
if self.next_table_colspec:
self.table.colspec = '{%s}\n' % self.next_table_colspec
self.next_table_colspec = None
# Redirect head output until header is finished. see visit_tbody.
self.body = self.tableheaders
def depart_thead(self, node):
self.body.append('\\hline')
def visit_tbody(self, node):
if not self.table.had_head:
self.visit_thead(node)
self.body = self.tablebody
def depart_tbody(self, node):
self.body.append('\\hline')
def visit_row(self, node):
self.table.col = 0
def depart_row(self, node):
if self.previous_spanning_row == 1:
self.previous_spanning_row = 0
self.body.append('\\\\\n')
self.table.rowcount += 1
def visit_entry(self, node):
if self.table.col > 0:
self.body.append(' & ')
elif self.remember_multirow.get(1, 0) > 1:
self.remember_multirow[1] -= 1
self.body.append(' & ')
self.table.col += 1
context = ''
if 'morerows' in node:
self.body.append(' \multirow{')
self.previous_spanning_row = 1
self.body.append(str(node.get('morerows') + 1))
self.body.append('}{*}{')
context += '}'
self.remember_multirow[self.table.col] = node.get('morerows') + 1
if 'morecols' in node:
self.body.append(' \multicolumn{')
self.body.append(str(node.get('morecols') + 1))
if self.table.col == 1:
self.body.append('}{|l|}{')
else:
self.body.append('}{l|}{')
context += '}'
if isinstance(node.parent.parent, nodes.thead):
self.body.append('\\textsf{\\relax ')
context += '}'
if self.remember_multirow.get(self.table.col + 1, 0) > 1:
self.remember_multirow[self.table.col + 1] -= 1
context += ' & '
self.context.append(context)
def depart_entry(self, node):
self.body.append(self.context.pop()) # header
def visit_acks(self, node):
# this is a list in the source, but should be rendered as a
# comma-separated list here
self.body.append('\n\n')
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node):
if not self.compact_list:
self.body.append('\\begin{itemize}\n' )
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node):
if not self.compact_list:
self.body.append('\\end{itemize}\n' )
def visit_enumerated_list(self, node):
self.body.append('\\begin{enumerate}\n' )
if 'start' in node:
self.body.append('\\setcounter{enumi}{%d}\n' % (node['start'] - 1))
if self.table:
self.table.has_problematic = True
def depart_enumerated_list(self, node):
self.body.append('\\end{enumerate}\n' )
def visit_list_item(self, node):
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node):
self.body.append('\n')
def visit_definition_list(self, node):
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node):
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node):
pass
def depart_definition_list_item(self, node):
pass
def visit_term(self, node):
ctx = '}] \\leavevmode'
if node.get('ids'):
ctx += self.hypertarget(node['ids'][0])
self.body.append('\\item[{')
self.context.append(ctx)
def depart_term(self, node):
self.body.append(self.context.pop())
def visit_termsep(self, node):
self.body.append(', ')
raise nodes.SkipNode
def visit_classifier(self, node):
self.body.append('{[}')
def depart_classifier(self, node):
self.body.append('{]}')
def visit_definition(self, node):
pass
def depart_definition(self, node):
self.body.append('\n')
def visit_field_list(self, node):
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node):
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node):
pass
def depart_field(self, node):
pass
visit_field_name = visit_term
depart_field_name = depart_term
visit_field_body = visit_definition
depart_field_body = depart_definition
def visit_paragraph(self, node):
self.body.append('\n')
def depart_paragraph(self, node):
self.body.append('\n')
def visit_centered(self, node):
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node):
self.body.append('\n\\end{center}')
def visit_hlist(self, node):
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
self.body.append('\\begin{itemize}\\setlength{\\itemsep}{0pt}'
'\\setlength{\\parskip}{0pt}\n')
if self.table:
self.table.has_problematic = True
def depart_hlist(self, node):
self.compact_list -= 1
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node):
pass
def depart_hlistcol(self, node):
pass
def latex_image_length(self, width_str):
match = re.match('(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
res = width_str
amount, unit = match.groups()[:2]
if not unit or unit == "px":
# pixels: let LaTeX alone
return None
elif unit == "%":
res = "%.3f\\linewidth" % (float(amount) / 100.0)
return res
def is_inline(self, node):
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node):
attrs = node.attributes
pre = [] # in reverse order
post = []
include_graphics_options = []
is_inline = self.is_inline(node)
if 'scale' in attrs:
# Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}')
if 'width' in attrs:
w = self.latex_image_length(attrs['width'])
if w:
include_graphics_options.append('width=%s' % w)
if 'height' in attrs:
h = self.latex_image_length(attrs['height'])
if h:
include_graphics_options.append('height=%s' % h)
if 'align' in attrs:
align_prepost = {
# By default latex aligns the top of an image.
(1, 'top'): ('', ''),
(1, 'middle'): ('\\raisebox{-0.5\\height}{', '}'),
(1, 'bottom'): ('\\raisebox{-\\height}{', '}'),
(0, 'center'): ('{\\hfill', '\\hfill}'),
# These 2 don't exactly do the right thing. The image should
# be floated alongside the paragraph. See
# http://www.w3.org/TR/html4/struct/objects.html#adef-align-IMG
(0, 'left'): ('{', '\\hfill}'),
(0, 'right'): ('{\\hfill', '}'),}
try:
pre.append(align_prepost[is_inline, attrs['align']][0])
post.append(align_prepost[is_inline, attrs['align']][1])
except KeyError:
pass
if not is_inline:
pre.append('\n')
post.append('\n')
pre.reverse()
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
# missing image!
if self.ignore_missing_images:
return
uri = node['uri']
if uri.find('://') != -1:
# ignore remote images
return
self.body.extend(pre)
options = ''
if include_graphics_options:
options = '[%s]' % ','.join(include_graphics_options)
self.body.append('\\includegraphics%s{%s}' % (options, uri))
self.body.extend(post)
def depart_image(self, node):
pass
def visit_figure(self, node):
ids = ''
for id in self.next_figure_ids:
ids += self.hypertarget(id, anchor=False)
self.next_figure_ids.clear()
if 'width' in node and node.get('align', '') in ('left', 'right'):
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
(node['align'] == 'right' and 'r' or 'l',
node['width']))
self.context.append(ids + '\\end{wrapfigure}\n')
else:
if (not 'align' in node.attributes or
node.attributes['align'] == 'center'):
# centering does not add vertical space like center.
align = '\n\\centering'
align_end = ''
else:
# TODO non vertical space for other alignments.
align = '\\begin{flush%s}' % node.attributes['align']
align_end = '\\end{flush%s}' % node.attributes['align']
self.body.append('\\begin{figure}[htbp]%s\n' % align)
if any(isinstance(child, nodes.caption) for child in node):
self.body.append('\\capstart\n')
self.context.append(ids + align_end + '\\end{figure}\n')
def depart_figure(self, node):
self.body.append(self.context.pop())
def visit_caption(self, node):
self.in_caption += 1
self.body.append('\\caption{')
def depart_caption(self, node):
self.body.append('}')
self.in_caption -= 1
def visit_legend(self, node):
self.body.append('{\\small ')
def depart_legend(self, node):
self.body.append('}')
def visit_admonition(self, node):
self.body.append('\n\\begin{notice}{note}')
def depart_admonition(self, node):
self.body.append('\\end{notice}\n')
def _make_visit_admonition(name):
def visit_admonition(self, node):
self.body.append(u'\n\\begin{notice}{%s}{%s:}' %
(name, admonitionlabels[name]))
return visit_admonition
def _depart_named_admonition(self, node):
self.body.append('\\end{notice}\n')
visit_attention = _make_visit_admonition('attention')
depart_attention = _depart_named_admonition
visit_caution = _make_visit_admonition('caution')
depart_caution = _depart_named_admonition
visit_danger = _make_visit_admonition('danger')
depart_danger = _depart_named_admonition
visit_error = _make_visit_admonition('error')
depart_error = _depart_named_admonition
visit_hint = _make_visit_admonition('hint')
depart_hint = _depart_named_admonition
visit_important = _make_visit_admonition('important')
depart_important = _depart_named_admonition
visit_note = _make_visit_admonition('note')
depart_note = _depart_named_admonition
visit_tip = _make_visit_admonition('tip')
depart_tip = _depart_named_admonition
visit_warning = _make_visit_admonition('warning')
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node):
pass
def depart_versionmodified(self, node):
pass
def visit_target(self, node):
def add_target(id):
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
return
# do not generate \phantomsection in \section{}
anchor = not self.in_title
self.body.append(self.hypertarget(id, anchor=anchor))
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
try:
next = node.parent[parindex+1]
except IndexError:
# last node in parent, look at next after parent
# (for section of equal level) if it exists
if node.parent.parent is not None:
next = node.parent.parent[
node.parent.parent.index(node.parent)]
else:
raise
if isinstance(next, nodes.section):
if node.get('refid'):
self.next_section_ids.add(node['refid'])
self.next_section_ids.update(node['ids'])
return
elif isinstance(next, nodes.figure):
# labels for figures go in the figure body, not before
if node.get('refid'):
self.next_figure_ids.add(node['refid'])
self.next_figure_ids.update(node['ids'])
return
elif isinstance(next, nodes.table):
# same for tables, but only if they have a caption
for n in node:
if isinstance(n, nodes.title):
if node.get('refid'):
self.next_table_ids.add(node['refid'])
self.next_table_ids.update(node['ids'])
return
except IndexError:
pass
if 'refuri' in node:
return
if node.get('refid'):
add_target(node['refid'])
for id in node['ids']:
add_target(id)
def depart_target(self, node):
pass
def visit_attribution(self, node):
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node):
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node, scre=re.compile(r';\s*')):
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
for type, string, tid, ismain in entries:
m = ''
if ismain:
m = '|textbf'
try:
if type == 'single':
p = scre.sub('!', self.encode(string))
self.body.append(r'\index{%s%s}' % (p, m))
elif type == 'pair':
p1, p2 = map(self.encode, split_into(2, 'pair', string))
self.body.append(r'\index{%s!%s%s}\index{%s!%s%s}' %
(p1, p2, m, p2, p1, m))
elif type == 'triple':
p1, p2, p3 = map(self.encode,
split_into(3, 'triple', string))
self.body.append(
r'\index{%s!%s %s%s}\index{%s!%s, %s%s}'
r'\index{%s!%s %s%s}' %
(p1, p2, p3, m, p2, p3, p1, m, p3, p1, p2, m))
elif type == 'see':
p1, p2 = map(self.encode, split_into(2, 'see', string))
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
elif type == 'seealso':
p1, p2 = map(self.encode, split_into(2, 'seealso', string))
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
else:
self.builder.warn(
'unknown index entry type %s found' % type)
except ValueError, err:
self.builder.warn(str(err))
raise nodes.SkipNode
def visit_raw(self, node):
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
uri = node.get('refuri', '')
if not uri and node.get('refid'):
uri = '%' + self.curfilestack[-1] + '#' + node['refid']
if self.in_title or not uri:
self.context.append('')
elif uri.startswith('mailto:') or uri.startswith('http:') or \
uri.startswith('https:') or uri.startswith('ftp:'):
self.body.append('\\href{%s}{' % self.encode_uri(uri))
# if configured, put the URL after the link
show_urls = self.builder.config.latex_show_urls
if node.astext() != uri and show_urls and show_urls != 'no':
if uri.startswith('mailto:'):
uri = uri[7:]
if show_urls == 'footnote' and not \
(self.in_footnote or self.in_caption):
# obviously, footnotes in footnotes are not going to work
self.context.append(
r'}\footnote{%s}' % self.encode_uri(uri))
else: # all other true values (b/w compat)
self.context.append('} (%s)' % self.encode_uri(uri))
else:
self.context.append('}')
elif uri.startswith('#'):
# references to labels in the same document
id = self.curfilestack[-1] + ':' + uri[1:]
self.body.append(self.hyperlink(id))
if self.builder.config.latex_show_pagerefs and not \
self.in_production_list:
self.context.append('}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}')
elif uri.startswith('%'):
# references to documents or labels inside documents
hashindex = uri.find('#')
if hashindex == -1:
# reference to the document
id = uri[1:] + '::doc'
else:
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
if len(node) and hasattr(node[0], 'attributes') and \
'std-term' in node[0].get('classes', []):
# don't add a pageref for glossary terms
self.context.append('}}')
else:
if self.builder.config.latex_show_pagerefs:
self.context.append('}} (%s)' % self.hyperpageref(id))
else:
self.context.append('}}')
else:
self.builder.warn('unusable reference target found: %s' % uri,
(self.curfilestack[-1], node.line))
self.context.append('')
def depart_reference(self, node):
self.body.append(self.context.pop())
def visit_download_reference(self, node):
pass
def depart_download_reference(self, node):
pass
def visit_pending_xref(self, node):
pass
def depart_pending_xref(self, node):
pass
def visit_emphasis(self, node):
self.body.append(r'\emph{')
def depart_emphasis(self, node):
self.body.append('}')
def visit_literal_emphasis(self, node):
self.body.append(r'\emph{\texttt{')
self.no_contractions += 1
def depart_literal_emphasis(self, node):
self.body.append('}}')
self.no_contractions -= 1
def visit_strong(self, node):
self.body.append(r'\textbf{')
def depart_strong(self, node):
self.body.append('}')
def visit_abbreviation(self, node):
abbr = node.astext()
self.body.append(r'\textsc{')
# spell out the explanation once
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
self.context.append('} (%s)' % self.encode(node['explanation']))
self.handled_abbrs.add(abbr)
else:
self.context.append('}')
def depart_abbreviation(self, node):
self.body.append(self.context.pop())
def visit_title_reference(self, node):
self.body.append(r'\emph{')
def depart_title_reference(self, node):
self.body.append('}')
def visit_citation(self, node):
# TODO maybe use cite bibitems
# bibitem: [citelabel, citetext, docname, citeid]
self.bibitems.append(['', '', '', ''])
self.context.append(len(self.body))
def depart_citation(self, node):
size = self.context.pop()
text = ''.join(self.body[size:])
del self.body[size:]
self.bibitems[-1][1] = text
def visit_citation_reference(self, node):
# This is currently never encountered, since citation_reference nodes
# are already replaced by pending_xref nodes in the environment.
self.body.append('\\cite{%s}' % self.idescape(node.astext()))
raise nodes.SkipNode
def visit_literal(self, node):
self.no_contractions += 1
if self.in_title:
self.body.append(r'\texttt{')
else:
self.body.append(r'\code{')
def depart_literal(self, node):
self.no_contractions -= 1
self.body.append('}')
def visit_footnote_reference(self, node):
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError):
raise nodes.SkipNode
# if a footnote has been inserted once, it shouldn't be repeated
# by the next reference
if used:
self.body.append('\\footnotemark[%s]' % num)
else:
if self.in_caption:
raise UnsupportedError('%s:%s: footnotes in float captions '
'are not supported by LaTeX' %
(self.curfilestack[-1], node.line))
footnode.walkabout(self)
self.footnotestack[-1][num][1] = True
raise nodes.SkipChildren
def depart_footnote_reference(self, node):
pass
def visit_literal_block(self, node):
if self.in_footnote:
raise UnsupportedError('%s:%s: literal blocks in footnotes are '
'not supported by LaTeX' %
(self.curfilestack[-1], node.line))
self.verbatim = ''
def depart_literal_block(self, node):
code = self.verbatim.rstrip('\n')
lang = self.hlsettingstack[-1][0]
linenos = code.count('\n') >= self.hlsettingstack[-1][1] - 1
highlight_args = node.get('highlight_args', {})
if 'language' in node:
# code-block directives
lang = node['language']
highlight_args['force'] = True
if 'linenos' in node:
linenos = node['linenos']
def warner(msg):
self.builder.warn(msg, (self.curfilestack[-1], node.line))
hlcode = self.highlighter.highlight_block(code, lang, warn=warner,
linenos=linenos, **highlight_args)
# workaround for Unicode issue
hlcode = hlcode.replace(u'€', u'@texteuro[]')
# must use original Verbatim environment and "tabular" environment
if self.table:
hlcode = hlcode.replace('\\begin{Verbatim}',
'\\begin{OriginalVerbatim}')
self.table.has_problematic = True
self.table.has_verbatim = True
# get consistent trailer
hlcode = hlcode.rstrip()[:-14] # strip \end{Verbatim}
hlcode = hlcode.rstrip() + '\n'
self.body.append('\n' + hlcode + '\\end{%sVerbatim}\n' %
(self.table and 'Original' or ''))
self.verbatim = None
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node):
self.body.append('\item[] ')
def depart_line(self, node):
self.body.append('\n')
def visit_line_block(self, node):
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
else:
self.body.append('\n\\begin{DUlineblock}{0em}\n')
if self.table:
self.table.has_problematic = True
def depart_line_block(self, node):
self.body.append('\\end{DUlineblock}\n')
def visit_block_quote(self, node):
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\begin{quote}\n')
if self.table:
self.table.has_problematic = True
def depart_block_quote(self, node):
done = 0
if len(node.children) == 1:
child = node.children[0]
if isinstance(child, nodes.bullet_list) or \
isinstance(child, nodes.enumerated_list):
done = 1
if not done:
self.body.append('\\end{quote}\n')
# option node handling copied from docutils' latex writer
def visit_option(self, node):
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
pass
def visit_option_group(self, node):
self.body.append('\\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node):
self.body.append('\\begin{optionlist}{3cm}\n')
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node):
self.body.append('\\end{optionlist}\n')
def visit_option_list_item(self, node):
pass
def depart_option_list_item(self, node):
pass
def visit_option_string(self, node):
ostring = node.astext()
self.no_contractions += 1
self.body.append(self.encode(ostring))
self.no_contractions -= 1
raise nodes.SkipNode
def visit_description(self, node):
self.body.append(' ')
def depart_description(self, node):
pass
def visit_superscript(self, node):
self.body.append('$^{\\text{')
def depart_superscript(self, node):
self.body.append('}}$')
def visit_subscript(self, node):
self.body.append('$_{\\text{')
def depart_subscript(self, node):
self.body.append('}}$')
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_substitution_reference(self, node):
raise nodes.SkipNode
def visit_inline(self, node):
classes = node.get('classes', [])
self.body.append(r'\DUspan{%s}{' % ','.join(classes))
def depart_inline(self, node):
self.body.append('}')
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_compound(self, node):
pass
def depart_compound(self, node):
pass
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_decoration(self, node):
pass
def depart_decoration(self, node):
pass
# docutils-generated elements that we don't support
def visit_header(self, node):
raise nodes.SkipNode
def visit_footer(self, node):
raise nodes.SkipNode
def visit_docinfo(self, node):
raise nodes.SkipNode
# text handling
def encode(self, text):
text = unicode(text).translate(tex_escape_map)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~')
if self.no_contractions:
text = text.replace('--', u'-{-}')
text = text.replace("''", u"'{'}")
return text
def encode_uri(self, text):
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~')
def visit_Text(self, node):
if self.verbatim is not None:
self.verbatim += node.astext()
else:
text = self.encode(node.astext())
if not self.no_contractions:
text = educate_quotes_latex(text)
self.body.append(text)
def depart_Text(self, node):
pass
def visit_comment(self, node):
raise nodes.SkipNode
def visit_meta(self, node):
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node):
pass
def depart_system_message(self, node):
self.body.append('\n')
def unknown_visit(self, node):
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
| agpl-3.0 |
gaddman/ansible | lib/ansible/modules/storage/netapp/netapp_e_asup.py | 21 | 11223 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_asup
short_description: NetApp E-Series manage auto-support settings
description:
- Allow the auto-support settings to be configured for an individual E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
state:
description:
- Enable/disable the E-Series auto-support configuration.
- When this option is enabled, configuration, logs, and other support-related information will be relayed
to NetApp to help better support your system. No personally identifiable information, passwords, etc, will
be collected.
default: enabled
choices:
- enabled
- disabled
aliases:
- asup
- auto_support
- autosupport
active:
description:
- Enable active/proactive monitoring for ASUP. When a problem is detected by our monitoring systems, it's
possible that the bundle did not contain all of the required information at the time of the event.
Enabling this option allows NetApp support personnel to manually request transmission or re-transmission
of support data in order ot resolve the problem.
- Only applicable if I(state=enabled).
default: yes
type: bool
start:
description:
- A start hour may be specified in a range from 0 to 23 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- start_time
default: 0
end:
description:
- An end hour may be specified in a range from 1 to 24 hours.
- ASUP bundles will be sent daily between the provided start and end time (UTC).
- I(start) must be less than I(end).
aliases:
- end_time
default: 24
days:
description:
- A list of days of the week that ASUP bundles will be sent. A larger, weekly bundle will be sent on one
of the provided days.
choices:
- monday
- tuesday
- wednesday
- thursday
- friday
- saturday
- sunday
required: no
aliases:
- days_of_week
- schedule_days
verbose:
description:
- Provide the full ASUP configuration in the return.
default: no
required: no
type: bool
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- Enabling ASUP will allow our support teams to monitor the logs of the storage-system in order to proactively
respond to issues with the system. It is recommended that all ASUP-related options be enabled, but they may be
disabled if desired.
- This API is currently only supported with the Embedded Web Services API v2.0 and higher.
"""
EXAMPLES = """
- name: Enable ASUP and allow pro-active retrieval of bundles
netapp_e_asup:
state: enabled
active: yes
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
- name: Set the ASUP schedule to only send bundles from 12 AM CST to 3 AM CST.
netapp_e_asup:
start: 17
end: 20
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: string
sample: The settings have been updated.
asup:
description:
- True if ASUP is enabled.
returned: on success
sample: True
type: bool
active:
description:
- True if the active option has been enabled.
returned: on success
sample: True
type: bool
cfg:
description:
- Provide the full ASUP configuration.
returned: on success when I(verbose=true).
type: complex
contains:
asupEnabled:
description:
- True if ASUP has been enabled.
type: bool
onDemandEnabled:
description:
- True if ASUP active monitoring has been enabled.
type: bool
daysOfWeek:
description:
- The days of the week that ASUP bundles will be sent.
type: list
"""
import json
import logging
from pprint import pformat
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class Asup(object):
DAYS_OPTIONS = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
state=dict(type='str', required=False, default='enabled', aliases=['asup', 'auto_support', 'autosupport'],
choices=['enabled', 'disabled']),
active=dict(type='bool', required=False, default=True, ),
days=dict(type='list', required=False, aliases=['schedule_days', 'days_of_week'],
choices=self.DAYS_OPTIONS),
start=dict(type='int', required=False, default=0, aliases=['start_time']),
end=dict(type='int', required=False, default=24, aliases=['end_time']),
verbose=dict(type='bool', required=False, default=False),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.asup = args['state'] == 'enabled'
self.active = args['active']
self.days = args['days']
self.start = args['start']
self.end = args['end']
self.verbose = args['verbose']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.start >= self.end:
self.module.fail_json(msg="The value provided for the start time is invalid."
" It must be less than the end time.")
if self.start < 0 or self.start > 23:
self.module.fail_json(msg="The value provided for the start time is invalid. It must be between 0 and 23.")
else:
self.start = self.start * 60
if self.end < 1 or self.end > 24:
self.module.fail_json(msg="The value provided for the end time is invalid. It must be between 1 and 24.")
else:
self.end = min(self.end * 60, 1439)
if not self.days:
self.days = self.DAYS_OPTIONS
def get_configuration(self):
try:
(rc, result) = request(self.url + 'device-asup', headers=HEADERS, **self.creds)
if not (result['asupCapable'] and result['onDemandCapable']):
self.module.fail_json(msg="ASUP is not supported on this device. Array Id [%s]." % (self.ssid))
return result
except Exception as err:
self.module.fail_json(msg="Failed to retrieve ASUP configuration! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
def update_configuration(self):
config = self.get_configuration()
update = False
body = dict()
if self.asup:
body = dict(asupEnabled=True)
if not config['asupEnabled']:
update = True
if (config['onDemandEnabled'] and config['remoteDiagsEnabled']) != self.active:
update = True
body.update(dict(onDemandEnabled=self.active,
remoteDiagsEnabled=self.active))
self.days.sort()
config['schedule']['daysOfWeek'].sort()
body['schedule'] = dict(daysOfWeek=self.days,
dailyMinTime=self.start,
dailyMaxTime=self.end,
weeklyMinTime=self.start,
weeklyMaxTime=self.end)
if self.days != config['schedule']['daysOfWeek']:
update = True
if self.start != config['schedule']['dailyMinTime'] or self.start != config['schedule']['weeklyMinTime']:
update = True
elif self.end != config['schedule']['dailyMaxTime'] or self.end != config['schedule']['weeklyMaxTime']:
update = True
elif config['asupEnabled']:
body = dict(asupEnabled=False)
update = True
self._logger.info(pformat(body))
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'device-asup', method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_configuration()
cfg = self.get_configuration()
if self.verbose:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'], cfg=cfg)
else:
self.module.exit_json(msg="The ASUP settings have been updated.", changed=update,
asup=cfg['asupEnabled'], active=cfg['onDemandEnabled'])
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = Asup()
settings()
if __name__ == '__main__':
main()
| gpl-3.0 |
Denisolt/IEEE-NYIT-MA | local/lib/python2.7/site-packages/PIL/BdfFontFile.py | 17 | 3367 | #
# The Python Imaging Library
# $Id$
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from . import Image, FontFile
# --------------------------------------------------------------------
# parse X Bitmap Distribution Format (BDF)
# --------------------------------------------------------------------
bdf_slant = {
"R": "Roman",
"I": "Italic",
"O": "Oblique",
"RI": "Reverse Italic",
"RO": "Reverse Oblique",
"OT": "Other"
}
bdf_spacing = {
"P": "Proportional",
"M": "Monospaced",
"C": "Cell"
}
def bdf_char(f):
# skip to STARTCHAR
while True:
s = f.readline()
if not s:
return None
if s[:9] == b"STARTCHAR":
break
id = s[9:].strip().decode('ascii')
# load symbol properties
props = {}
while True:
s = f.readline()
if not s or s[:6] == b"BITMAP":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
# load bitmap
bitmap = []
while True:
s = f.readline()
if not s or s[:7] == b"ENDCHAR":
break
bitmap.append(s[:-1])
bitmap = b"".join(bitmap)
[x, y, l, d] = [int(p) for p in props["BBX"].split()]
[dx, dy] = [int(p) for p in props["DWIDTH"].split()]
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
try:
im = Image.frombytes("1", (x, y), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (x, y))
return id, int(props["ENCODING"]), bbox, im
##
# Font file plugin for the X11 BDF format.
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
FontFile.FontFile.__init__(self)
s = fp.readline()
if s[:13] != b"STARTFONT 2.1":
raise SyntaxError("not a valid BDF file")
props = {}
comments = []
while True:
s = fp.readline()
if not s or s[:13] == b"ENDPROPERTIES":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
if s.find(b"LogicalFontDescription") < 0:
comments.append(s[i+1:-1].decode('ascii'))
# font = props["FONT"].split("-")
# font[4] = bdf_slant[font[4].upper()]
# font[11] = bdf_spacing[font[11].upper()]
# ascent = int(props["FONT_ASCENT"])
# descent = int(props["FONT_DESCENT"])
# fontname = ";".join(font[1:])
# print("#", fontname)
# for i in comments:
# print("#", i)
while True:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if 0 <= ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
| gpl-3.0 |
zhuwenping/python-for-android | python3-alpha/python3-src/Lib/idlelib/configSectionNameDialog.py | 55 | 3743 | """
Dialog that allows user to specify a new config file section name.
Used to get new highlight theme and keybinding set names.
"""
from tkinter import *
import tkinter.messagebox as tkMessageBox
class GetCfgSectionNameDialog(Toplevel):
def __init__(self,parent,title,message,usedNames):
"""
message - string, informational message to display
usedNames - list, list of names already in use for validity check
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE,width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.message=message
self.usedNames=usedNames
self.result=''
self.CreateWidgets()
self.withdraw() #hide while setting geometry
self.update_idletasks()
#needs to be done here so that the winfo_reqwidth is valid
self.messageInfo.config(width=self.frameMain.winfo_reqwidth())
self.geometry("+%d+%d" %
((parent.winfo_rootx()+((parent.winfo_width()/2)
-(self.winfo_reqwidth()/2)),
parent.winfo_rooty()+((parent.winfo_height()/2)
-(self.winfo_reqheight()/2)) )) ) #centre dialog over parent
self.deiconify() #geometry set, unhide
self.wait_window()
def CreateWidgets(self):
self.name=StringVar(self)
self.fontSize=StringVar(self)
self.frameMain = Frame(self,borderwidth=2,relief=SUNKEN)
self.frameMain.pack(side=TOP,expand=TRUE,fill=BOTH)
self.messageInfo=Message(self.frameMain,anchor=W,justify=LEFT,padx=5,pady=5,
text=self.message)#,aspect=200)
entryName=Entry(self.frameMain,textvariable=self.name,width=30)
entryName.focus_set()
self.messageInfo.pack(padx=5,pady=5)#,expand=TRUE,fill=BOTH)
entryName.pack(padx=5,pady=5)
frameButtons=Frame(self)
frameButtons.pack(side=BOTTOM,fill=X)
self.buttonOk = Button(frameButtons,text='Ok',
width=8,command=self.Ok)
self.buttonOk.grid(row=0,column=0,padx=5,pady=5)
self.buttonCancel = Button(frameButtons,text='Cancel',
width=8,command=self.Cancel)
self.buttonCancel.grid(row=0,column=1,padx=5,pady=5)
def NameOk(self):
#simple validity check for a sensible
#ConfigParser file section name
nameOk=1
name=self.name.get()
name.strip()
if not name: #no name specified
tkMessageBox.showerror(title='Name Error',
message='No name specified.', parent=self)
nameOk=0
elif len(name)>30: #name too long
tkMessageBox.showerror(title='Name Error',
message='Name too long. It should be no more than '+
'30 characters.', parent=self)
nameOk=0
elif name in self.usedNames:
tkMessageBox.showerror(title='Name Error',
message='This name is already in use.', parent=self)
nameOk=0
return nameOk
def Ok(self, event=None):
if self.NameOk():
self.result=self.name.get().strip()
self.destroy()
def Cancel(self, event=None):
self.result=''
self.destroy()
if __name__ == '__main__':
#test the dialog
root=Tk()
def run():
keySeq=''
dlg=GetCfgSectionNameDialog(root,'Get Name',
'The information here should need to be word wrapped. Test.')
print(dlg.result)
Button(root,text='Dialog',command=run).pack()
root.mainloop()
| apache-2.0 |
singleyoungtao/myblog-flask | tests/test_api.py | 1 | 10718 | #!/usr/bin/python
#-*-coding: utf-8 -*-
import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role, Post, Comment
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' +b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'),
content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
#添加一个用户
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
#用错误密码认证
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
#增加用户
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
#发出一个坏的令牌请求
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
#获得令牌
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
#发出带有令牌的请求
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
#添加一个未认证用户
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
#用未认证的账户获得博客列表
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 403)
def test_posts(self):
#添加一个用户
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
#写一篇空的博客
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': ''}))
self.assertTrue(response.status_code == 400)
#写一篇文章
response = self.client.post(
url_for('api.new_post'),
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'body of the *blog* post'}))
self.assertTrue(response.status_code == 201)
url = response.headers.get('Location')
self.assertIsNotNone(url)
# get the new post
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'body of the *blog* post')
self.assertTrue(json_response['body_html'] ==
'<p>body of the <em>blog</em> post</p>')
json_post = json_response
# get the post from the user
response = self.client.get(
url_for('api.get_user_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# get the post from the user as a follower
response = self.client.get(
url_for('api.get_user_followed_posts', id=u.id),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 1)
self.assertTrue(json_response['posts'][0] == json_post)
# edit post
response = self.client.put(
url,
headers=self.get_api_headers('[email protected]', 'cat'),
data=json.dumps({'body': 'updated body'}))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] == 'updated body')
self.assertTrue(json_response['body_html'] == '<p>updated body</p>')
def test_users(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
def test_comments(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# add a post
post = Post(body='body of the post', author=u1)
db.session.add(post)
db.session.commit()
# write a comment
response = self.client.post(
url_for('api.new_post_comment', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'),
data=json.dumps({'body': 'Good [post](http://example.com)!'}))
self.assertTrue(response.status_code == 201)
json_response = json.loads(response.data.decode('utf-8'))
url = response.headers.get('Location')
self.assertIsNotNone(url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
self.assertTrue(
re.sub('<.*?>', '', json_response['body_html']) == 'Good post!')
# get the new comment
response = self.client.get(
url,
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['url'] == url)
self.assertTrue(json_response['body'] ==
'Good [post](http://example.com)!')
# add another comment
comment = Comment(body='Thank you!', author=u1, post=post)
db.session.add(comment)
db.session.commit()
# get the two comments from the post
response = self.client.get(
url_for('api.get_post_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
# get all the comments
response = self.client.get(
url_for('api.get_comments', id=post.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('posts'))
self.assertTrue(json_response.get('count', 0) == 2)
| mit |
virtuald/exaile | plugins/ipconsole/ipython_view/ipython_view.py | 2 | 20044 | #!/usr/bin/python3
'''
Provides IPython console widget.
@author: Eitan Isaacson
@organization: IBM Corporation
@copyright: Copyright (c) 2007 IBM Corporation
@license: BSD
All rights reserved. This program and the accompanying materials are made
available under the terms of the BSD which accompanies this distribution, and
is available at U{http://www.opensource.org/licenses/bsd-license.php}
'''
# Taken from [1] (rev 64b6e0c, 2014-03-18) with slight modifications.
# [1] https://git.gnome.org/browse/accerciser/tree/plugins/ipython_view.py
import gi
from gi.repository import Gtk as gtk
from gi.repository import Gdk as gdk
from gi.repository import GLib
from gi.repository import Pango
from pkg_resources import parse_version
import re
import sys
import os
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from functools import reduce
try:
import IPython
except ImportError:
IPython = None
class IterableIPShell:
'''
Create an IPython instance. Does not start a blocking event loop,
instead allow single iterations. This allows embedding in GTK+
without blockage.
@ivar IP: IPython instance.
@type IP: IPython.iplib.InteractiveShell
@ivar iter_more: Indicates if the line executed was a complete command,
or we should wait for more.
@type iter_more: integer
@ivar history_level: The place in history where we currently are
when pressing up/down.
@type history_level: integer
@ivar complete_sep: Seperation delimeters for completion function.
@type complete_sep: _sre.SRE_Pattern
'''
def __init__(self,argv=[],user_ns=None,user_global_ns=None,
cin=None, cout=None,cerr=None, input_func=None):
'''
@param argv: Command line options for IPython
@type argv: list
@param user_ns: User namespace.
@type user_ns: dictionary
@param user_global_ns: User global namespace.
@type user_global_ns: dictionary.
@param cin: Console standard input.
@type cin: IO stream
@param cout: Console standard output.
@type cout: IO stream
@param cerr: Console standard error.
@type cerr: IO stream
@param input_func: Replacement for builtin raw_input()
@type input_func: function
'''
io = IPython.utils.io
if input_func:
if parse_version(IPython.release.version) >= parse_version("1.2.1"):
IPython.terminal.interactiveshell.raw_input_original = input_func
else:
IPython.frontend.terminal.interactiveshell.raw_input_original = input_func
if cin:
io.stdin = io.IOStream(cin)
if cout:
io.stdout = io.IOStream(cout)
if cerr:
io.stderr = io.IOStream(cerr)
# This is to get rid of the blockage that accurs during
# IPython.Shell.InteractiveShell.user_setup()
io.raw_input = lambda x: None
os.environ['TERM'] = 'dumb'
excepthook = sys.excepthook
from IPython.config.loader import Config
cfg = Config()
cfg.InteractiveShell.colors = "Linux"
# InteractiveShell's __init__ overwrites io.stdout,io.stderr with
# sys.stdout, sys.stderr, this makes sure they are right
#
old_stdout, old_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = io.stdout.stream, io.stderr.stream
# InteractiveShell inherits from SingletonConfigurable, so use instance()
#
if parse_version(IPython.release.version) >= parse_version("1.2.1"):
self.IP = IPython.terminal.embed.InteractiveShellEmbed.instance(\
config=cfg, user_ns=user_ns)
else:
self.IP = IPython.frontend.terminal.embed.InteractiveShellEmbed.instance(\
config=cfg, user_ns=user_ns)
sys.stdout, sys.stderr = old_stdout, old_stderr
self.IP.system = lambda cmd: self.shell(self.IP.var_expand(cmd),
header='IPython system call: ')
# local_ns=user_ns)
#global_ns=user_global_ns)
#verbose=self.IP.rc.system_verbose)
self.IP.raw_input = input_func
sys.excepthook = excepthook
self.iter_more = 0
self.history_level = 0
self.complete_sep = re.compile('[\s\{\}\[\]\(\)]')
self.updateNamespace({'exit':lambda:None})
self.updateNamespace({'quit':lambda:None})
self.IP.readline_startup_hook(self.IP.pre_readline)
# Workaround for updating namespace with sys.modules
#
self.__update_namespace()
# help() is blocking, which hangs GTK+.
import pydoc
self.updateNamespace({'help': pydoc.doc})
def __update_namespace(self):
'''
Update self.IP namespace for autocompletion with sys.modules
'''
for k, v in list(sys.modules.items()):
if not '.' in k:
self.IP.user_ns.update({k:v})
def execute(self):
'''
Executes the current line provided by the shell object.
'''
self.history_level = 0
# this is needed because some functions in IPython use 'print' to print
# output (like 'who')
#
orig_stdout = sys.stdout
sys.stdout = IPython.utils.io.stdout
orig_stdin = sys.stdin
sys.stdin = IPython.utils.io.stdin;
self.prompt = self.generatePrompt(self.iter_more)
self.IP.hooks.pre_prompt_hook()
if self.iter_more:
try:
self.prompt = self.generatePrompt(True)
except:
self.IP.showtraceback()
if self.IP.autoindent:
self.IP.rl_do_indent = True
try:
line = self.IP.raw_input(self.prompt)
except KeyboardInterrupt:
self.IP.write('\nKeyboardInterrupt\n')
self.IP.input_splitter.reset()
except:
self.IP.showtraceback()
else:
self.IP.input_splitter.push(line)
self.iter_more = self.IP.input_splitter.push_accepts_more()
self.prompt = self.generatePrompt(self.iter_more)
if (self.IP.SyntaxTB.last_syntax_error and
self.IP.autoedit_syntax):
self.IP.edit_syntax_error()
if not self.iter_more:
if parse_version(IPython.release.version) >= parse_version("2.0.0-dev"):
source_raw = self.IP.input_splitter.raw_reset()
else:
source_raw = self.IP.input_splitter.source_raw_reset()[1]
self.IP.run_cell(source_raw, store_history=True)
self.IP.rl_do_indent = False
else:
# TODO: Auto-indent
#
self.IP.rl_do_indent = True
pass
sys.stdout = orig_stdout
sys.stdin = orig_stdin
def generatePrompt(self, is_continuation):
'''
Generate prompt depending on is_continuation value
@param is_continuation
@type is_continuation: boolean
@return: The prompt string representation
@rtype: string
'''
# Backwards compatibility with ipyton-0.11
#
ver = IPython.__version__
if '0.11' in ver:
prompt = self.IP.hooks.generate_prompt(is_continuation)
else:
if is_continuation:
prompt = self.IP.prompt_manager.render('in2')
else:
prompt = self.IP.prompt_manager.render('in')
return prompt
def historyBack(self):
'''
Provides one history command back.
@return: The command string.
@rtype: string
'''
self.history_level -= 1
if not self._getHistory():
self.history_level +=1
return self._getHistory()
def historyForward(self):
'''
Provides one history command forward.
@return: The command string.
@rtype: string
'''
if self.history_level < 0:
self.history_level += 1
return self._getHistory()
def _getHistory(self):
'''
Get's the command string of the current history level.
@return: Historic command string.
@rtype: string
'''
try:
rv = self.IP.user_ns['In'][self.history_level].strip('\n')
except IndexError:
rv = ''
return rv
def updateNamespace(self, ns_dict):
'''
Add the current dictionary to the shell namespace.
@param ns_dict: A dictionary of symbol-values.
@type ns_dict: dictionary
'''
self.IP.user_ns.update(ns_dict)
def complete(self, line):
'''
Returns an auto completed line and/or posibilities for completion.
@param line: Given line so far.
@type line: string
@return: Line completed as for as possible,
and possible further completions.
@rtype: tuple
'''
split_line = self.complete_sep.split(line)
if split_line[-1]:
possibilities = self.IP.complete(split_line[-1])
else:
completed = line
possibilities = ['', []]
if possibilities:
def _commonPrefix(str1, str2):
'''
Reduction function. returns common prefix of two given strings.
@param str1: First string.
@type str1: string
@param str2: Second string
@type str2: string
@return: Common prefix to both strings.
@rtype: string
'''
for i in range(len(str1)):
if not str2.startswith(str1[:i+1]):
return str1[:i]
return str1
if possibilities[1]:
common_prefix = reduce(_commonPrefix, possibilities[1]) or line[-1]
completed = line[:-len(split_line[-1])]+common_prefix
else:
completed = line
else:
completed = line
return completed, possibilities[1]
def shell(self, cmd,verbose=0,debug=0,header=''):
'''
Replacement method to allow shell commands without them blocking.
@param cmd: Shell command to execute.
@type cmd: string
@param verbose: Verbosity
@type verbose: integer
@param debug: Debug level
@type debug: integer
@param header: Header to be printed before output
@type header: string
'''
stat = 0
if verbose or debug: print(header+cmd)
# flush stdout so we don't mangle python's buffering
if not debug:
input, output = os.popen4(cmd)
print(output.read())
output.close()
input.close()
class ConsoleView(gtk.TextView):
'''
Specialized text view for console-like workflow.
@cvar ANSI_COLORS: Mapping of terminal colors to X11 names.
@type ANSI_COLORS: dictionary
@ivar text_buffer: Widget's text buffer.
@type text_buffer: gtk.TextBuffer
@ivar color_pat: Regex of terminal color pattern
@type color_pat: _sre.SRE_Pattern
@ivar mark: Scroll mark for automatic scrolling on input.
@type mark: gtk.TextMark
@ivar line_start: Start of command line mark.
@type line_start: gtk.TextMark
'''
ANSI_COLORS = {'0;30': 'Black', '0;31': 'Red',
'0;32': 'Green', '0;33': 'Brown',
'0;34': 'Blue', '0;35': 'Purple',
'0;36': 'Cyan', '0;37': 'LightGray',
'1;30': 'DarkGray', '1;31': 'DarkRed',
'1;32': 'SeaGreen', '1;33': 'Yellow',
'1;34': 'LightBlue', '1;35': 'MediumPurple',
'1;36': 'LightCyan', '1;37': 'White'}
def __init__(self):
'''
Initialize console view.
'''
gtk.TextView.__init__(self)
self.modify_font(Pango.FontDescription('Mono'))
self.set_cursor_visible(True)
self.text_buffer = self.get_buffer()
self.mark = self.text_buffer.create_mark('scroll_mark',
self.text_buffer.get_end_iter(),
False)
for code in self.ANSI_COLORS:
self.text_buffer.create_tag(code,
foreground=self.ANSI_COLORS[code],
weight=700)
self.text_buffer.create_tag('0')
self.text_buffer.create_tag('notouch', editable=False)
self.color_pat = re.compile('\x01?\x1b\[(.*?)m\x02?')
self.line_start = \
self.text_buffer.create_mark('line_start',
self.text_buffer.get_end_iter(), True)
self.connect('key-press-event', self.onKeyPress)
def write(self, text, editable=False):
GLib.idle_add(self._write, text, editable)
def _write(self, text, editable=False):
'''
Write given text to buffer.
@param text: Text to append.
@type text: string
@param editable: If true, added text is editable.
@type editable: boolean
'''
segments = self.color_pat.split(text)
segment = segments.pop(0)
start_mark = self.text_buffer.create_mark(None,
self.text_buffer.get_end_iter(),
True)
self.text_buffer.insert(self.text_buffer.get_end_iter(), segment)
if segments:
ansi_tags = self.color_pat.findall(text)
for tag in ansi_tags:
i = segments.index(tag)
self.text_buffer.insert_with_tags_by_name(self.text_buffer.get_end_iter(),
segments[i+1], tag)
segments.pop(i)
if not editable:
self.text_buffer.apply_tag_by_name('notouch',
self.text_buffer.get_iter_at_mark(start_mark),
self.text_buffer.get_end_iter())
self.text_buffer.delete_mark(start_mark)
self.scroll_mark_onscreen(self.mark)
def showPrompt(self, prompt):
GLib.idle_add(self._showPrompt, prompt)
def _showPrompt(self, prompt):
'''
Prints prompt at start of line.
@param prompt: Prompt to print.
@type prompt: string
'''
self._write(prompt)
self.text_buffer.move_mark(self.line_start,
self.text_buffer.get_end_iter())
def changeLine(self, text):
GLib.idle_add(self._changeLine, text)
def _changeLine(self, text):
'''
Replace currently entered command line with given text.
@param text: Text to use as replacement.
@type text: string
'''
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.delete(self.text_buffer.get_iter_at_mark(self.line_start), iter)
self._write(text, True)
def getCurrentLine(self):
'''
Get text in current command line.
@return: Text of current command line.
@rtype: string
'''
rv = self.text_buffer.get_slice(
self.text_buffer.get_iter_at_mark(self.line_start),
self.text_buffer.get_end_iter(), False)
return rv
def showReturned(self, text):
GLib.idle_add(self._showReturned, text)
def _showReturned(self, text):
'''
Show returned text from last command and print new prompt.
@param text: Text to show.
@type text: string
'''
iter = self.text_buffer.get_iter_at_mark(self.line_start)
iter.forward_to_line_end()
self.text_buffer.apply_tag_by_name(
'notouch',
self.text_buffer.get_iter_at_mark(self.line_start),
iter)
self._write('\n'+text)
if text:
self._write('\n')
self._write('\n') # Add extra line, like normal IPython
self._showPrompt(self.prompt)
self.text_buffer.move_mark(self.line_start, self.text_buffer.get_end_iter())
self.text_buffer.place_cursor(self.text_buffer.get_end_iter())
if self.IP.rl_do_indent:
indentation = self.IP.input_splitter.indent_spaces * ' '
self.text_buffer.insert_at_cursor(indentation)
def onKeyPress(self, widget, event):
'''
Key press callback used for correcting behavior for console-like
interfaces. For example 'home' should go to prompt, not to begining of
line.
@param widget: Widget that key press accored in.
@type widget: gtk.Widget
@param event: Event object
@type event: gtk.gdk.Event
@return: Return True if event should not trickle.
@rtype: boolean
'''
insert_mark = self.text_buffer.get_insert()
insert_iter = self.text_buffer.get_iter_at_mark(insert_mark)
selection_mark = self.text_buffer.get_selection_bound()
selection_iter = self.text_buffer.get_iter_at_mark(selection_mark)
start_iter = self.text_buffer.get_iter_at_mark(self.line_start)
if event.keyval == gdk.KEY_Home:
if event.state & gdk.ModifierType.CONTROL_MASK or \
event.state & gdk.ModifierType.MOD1_MASK:
pass
elif event.state & gdk.ModifierType.SHIFT_MASK:
self.text_buffer.move_mark(insert_mark, start_iter)
return True
else:
self.text_buffer.place_cursor(start_iter)
return True
elif event.keyval == gdk.KEY_Left:
insert_iter.backward_cursor_position()
if not insert_iter.editable(True):
return True
elif not event.string:
pass
elif start_iter.compare(insert_iter) <= 0 and \
start_iter.compare(selection_iter) <= 0:
pass
elif start_iter.compare(insert_iter) > 0 and \
start_iter.compare(selection_iter) > 0:
self.text_buffer.place_cursor(start_iter)
elif insert_iter.compare(selection_iter) < 0:
self.text_buffer.move_mark(insert_mark, start_iter)
elif insert_iter.compare(selection_iter) > 0:
self.text_buffer.move_mark(selection_mark, start_iter)
return self.onKeyPressExtend(event)
def onKeyPressExtend(self, event):
'''
For some reason we can't extend onKeyPress directly (bug #500900).
'''
pass
class IPythonView(ConsoleView, IterableIPShell):
'''
Sub-class of both modified IPython shell and L{ConsoleView} this makes
a GTK+ IPython console.
'''
def __init__(self):
'''
Initialize. Redirect I/O to console.
'''
ConsoleView.__init__(self)
self.cout = StringIO()
IterableIPShell.__init__(self, cout=self.cout, cerr=self.cout,
input_func=self.raw_input)
# self.connect('key_press_event', self.keyPress)
self.interrupt = False
self.execute()
self.prompt = self.generatePrompt(False)
self.cout.truncate(0)
self.showPrompt(self.prompt)
def raw_input(self, prompt=''):
'''
Custom raw_input() replacement. Get's current line from console buffer.
@param prompt: Prompt to print. Here for compatability as replacement.
@type prompt: string
@return: The current command line text.
@rtype: string
'''
if self.interrupt:
self.interrupt = False
raise KeyboardInterrupt
return self.getCurrentLine()
def onKeyPressExtend(self, event):
'''
Key press callback with plenty of shell goodness, like history,
autocompletions, etc.
@param widget: Widget that key press occured in.
@type widget: gtk.Widget
@param event: Event object.
@type event: gtk.gdk.Event
@return: True if event should not trickle.
@rtype: boolean
'''
if event.state & gdk.ModifierType.CONTROL_MASK and event.keyval == 99:
self.interrupt = True
self._processLine()
return True
elif event.keyval == gdk.KEY_Return:
self._processLine()
return True
elif event.keyval == gdk.KEY_Up:
self.changeLine(self.historyBack())
return True
elif event.keyval == gdk.KEY_Down:
self.changeLine(self.historyForward())
return True
elif event.keyval == gdk.KEY_Tab:
if not self.getCurrentLine().strip():
return False
completed, possibilities = self.complete(self.getCurrentLine())
if len(possibilities) > 1:
slice = self.getCurrentLine()
self.write('\n')
for symbol in possibilities:
self.write(symbol+'\n')
self.showPrompt(self.prompt)
self.changeLine(completed or slice)
return True
def _processLine(self):
'''
Process current command line.
'''
self.history_pos = 0
self.execute()
rv = self.cout.getvalue()
if rv: rv = rv.strip('\n')
self.showReturned(rv)
self.cout.truncate(0)
self.cout.seek(0)
if __name__ == "__main__":
window = gtk.Window()
window.set_default_size(640, 320)
window.connect('delete-event', lambda x, y: gtk.main_quit())
window.add(IPythonView())
window.show_all()
gtk.main()
| gpl-2.0 |
valtandor/easybuild-easyblocks | easybuild/easyblocks/h/hadoop.py | 2 | 5312 | ##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing Hadoop, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import glob
import os
import re
import shutil
from easybuild.easyblocks.generic.tarball import Tarball
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_Hadoop(Tarball):
"""Support for building/installing Hadoop."""
@staticmethod
def extra_options():
"""Custom easyconfig parameters for Hadoop."""
extra_vars = {
'build_native_libs': [False, "Build native libraries", CUSTOM],
'extra_native_libs': [[], "Extra native libraries to install (list of tuples)", CUSTOM],
}
return Tarball.extra_options(extra_vars)
def build_step(self):
"""Custom build procedure for Hadoop: build native libraries, if requested."""
if self.cfg['build_native_libs']:
cmd = "mvn package -DskipTests -Dmaven.javadoc.skip -Dtar -Pdist,native"
# Building snappy, bzip2 jars w/ native libs requires -Drequire.snappy -Drequire.bzip2, etc.
for native_lib, lib_path in self.cfg['extra_native_libs']:
lib_root = get_software_root(native_lib)
if not lib_root:
raise EasyBuildError("%s not found. Failing install" % native_lib)
cmd += ' -Drequire.%s=true -D%s.prefix=%s' % (native_lib, native_lib, lib_root)
if self.cfg['parallel'] > 1:
cmd += " -T%d" % self.cfg['parallel']
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def install_step(self):
"""Custom install procedure for Hadoop: install-by-copy."""
if self.cfg['build_native_libs']:
src = os.path.join(self.cfg['start_dir'], 'hadoop-dist', 'target', 'hadoop-%s' % self.version)
super(EB_Hadoop, self).install_step(src=src)
else:
super(EB_Hadoop, self).install_step()
def post_install_step(self):
"""After the install, copy the extra native libraries into place."""
for native_library, lib_path in self.cfg['extra_native_libs']:
lib_root = get_software_root(native_library)
lib_src = os.path.join(lib_root, lib_path)
lib_dest = os.path.join(self.installdir, 'lib', 'native')
self.log.info('Copying shared objects in "%s"', lib_src)
for lib in glob.glob(lib_src):
self.log.info('Copying "%s" to "%s"', lib, lib_dest)
shutil.copy2(lib, lib_dest)
def sanity_check_step(self):
"""Custom sanity check for Hadoop."""
native_files = []
if self.cfg['build_native_libs']:
native_files = ['lib/native/libhadoop.%s' % get_shared_lib_ext()]
custom_paths = {
'files': ['bin/hadoop'] + native_files,
'dirs': ['etc', 'libexec'],
}
super(EB_Hadoop, self).sanity_check_step(custom_paths=custom_paths)
fake_mod_data = self.load_fake_module(purge=True)
# exit code is ignored, since this cmd exits with 1 if not all native libraries were found
cmd = "hadoop checknative -a"
out, _ = run_cmd(cmd, simple=False, log_all=False, log_ok=False)
self.clean_up_fake_module(fake_mod_data)
not_found = []
installdir = os.path.realpath(self.installdir)
lib_src = os.path.join(installdir, 'lib', 'native')
for native_lib, _ in self.cfg['extra_native_libs']:
if not re.search(r'%s: *true *%s' % (native_lib, lib_src), out):
not_found.append(native_lib)
if not_found:
raise EasyBuildError("%s not found by 'hadoop checknative -a'.", ', '.join(not_found))
def make_module_extra(self):
"""Custom extra module file entries for Hadoop."""
txt = super(EB_Hadoop, self).make_module_extra()
mapreduce_subdir = os.path.join('share', 'hadoop', 'mapreduce')
txt += self.module_generator.prepend_paths('HADOOP_HOME', mapreduce_subdir)
return txt
| gpl-2.0 |
AnishShah/tensorflow | tensorflow/contrib/gan/python/losses/python/tuple_losses.py | 73 | 1231 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TFGAN utilities for loss functions that accept GANModel namedtuples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.losses.python import tuple_losses_impl
from tensorflow.contrib.gan.python.losses.python.tuple_losses_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = tuple_losses_impl.__all__
remove_undocumented(__name__, __all__)
| apache-2.0 |
rlr/kitsune | kitsune/sumo/tests/test_templates.py | 13 | 3924 | from django.conf import settings
from django.test.client import RequestFactory
from django.utils import translation
import jingo
import mock
from nose.tools import eq_
from pyquery import PyQuery as pq
from kitsune.sumo.tests import LocalizingClient, TestCase
from kitsune.sumo.urlresolvers import reverse
def setup():
jingo.load_helpers()
def test_breadcrumb():
"""Make sure breadcrumb links start with /."""
c = LocalizingClient()
response = c.get(reverse('search'))
doc = pq(response.content)
href = doc('.breadcrumbs a')[0]
eq_('/', href.attrib['href'][0])
class MockRequestTests(TestCase):
"""Base class for tests that need a mock request"""
def setUp(self):
super(MockRequestTests, self).setUp()
request = RequestFactory()
request.GET = {}
request.LANGUAGE_CODE = 'en-US'
self.request = request
class BaseTemplateTests(MockRequestTests):
"""Tests for base.html"""
def setUp(self):
super(BaseTemplateTests, self).setUp()
self.template = 'base.html'
def test_dir_ltr(self):
"""Make sure dir attr is set to 'ltr' for LTR language."""
html = jingo.render_to_string(self.request, self.template)
eq_('ltr', pq(html)('html').attr['dir'])
def test_dir_rtl(self):
"""Make sure dir attr is set to 'rtl' for RTL language."""
translation.activate('he')
self.request.LANGUAGE_CODE = 'he'
html = jingo.render_to_string(self.request, self.template)
eq_('rtl', pq(html)('html').attr['dir'])
translation.deactivate()
def test_multi_feeds(self):
"""Ensure that multiple feeds are put into the page when set."""
feed_urls = (('/feed_one', 'First Feed'),
('/feed_two', 'Second Feed'),)
doc = pq(jingo.render_to_string(self.request, self.template, {
'feeds': feed_urls}))
feeds = doc('link[type="application/atom+xml"]')
eq_(2, len(feeds))
eq_('First Feed', feeds[0].attrib['title'])
eq_('Second Feed', feeds[1].attrib['title'])
def test_readonly_attr(self):
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
eq_('false', doc('body')[0].attrib['data-readonly'])
@mock.patch.object(settings._wrapped, 'READ_ONLY', True)
def test_readonly_login_link_disabled(self):
"""Ensure that login/register links are hidden in READ_ONLY."""
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
eq_(0, len(doc('a.sign-out, a.sign-in')))
@mock.patch.object(settings._wrapped, 'READ_ONLY', False)
def test_not_readonly_login_link_enabled(self):
"""Ensure that login/register links are visible in not READ_ONLY."""
html = jingo.render_to_string(self.request, self.template)
doc = pq(html)
assert len(doc('a.sign-out, a.sign-in')) > 0
class ErrorListTests(MockRequestTests):
"""Tests for errorlist.html, which renders form validation errors."""
def test_escaping(self):
"""Make sure we escape HTML entities, lest we court XSS errors."""
class MockForm(object):
errors = True
auto_id = 'id_'
def visible_fields(self):
return [{'errors': ['<"evil&ness-field">']}]
def non_field_errors(self):
return ['<"evil&ness-non-field">']
source = ("""{% from "layout/errorlist.html" import errorlist %}"""
"""{{ errorlist(form) }}""")
html = jingo.render_to_string(self.request,
jingo.env.from_string(source),
{'form': MockForm()})
assert '<"evil&ness' not in html
assert '<"evil&ness-field">' in html
assert '<"evil&ness-non-field">' in html
| bsd-3-clause |
pcu4dros/pandora-core | workspace/lib/python3.5/site-packages/flask_migrate/templates/flask-multidb/env.py | 31 | 5448 | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool, MetaData
from logging.config import fileConfig
import logging
USE_TWOPHASE = False
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
logger = logging.getLogger('alembic.env')
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
from flask import current_app
config.set_main_option('sqlalchemy.url',
current_app.config.get('SQLALCHEMY_DATABASE_URI'))
bind_names = []
for name, url in current_app.config.get("SQLALCHEMY_BINDS").items():
context.config.set_section_option(name, "sqlalchemy.url", url)
bind_names.append(name)
target_metadata = current_app.extensions['migrate'].db.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def get_metadata(bind):
"""Return the metadata for a bind."""
if bind == '':
bind = None
m = MetaData()
for t in target_metadata.tables.values():
if t.info.get('bind_key') == bind:
t.tometadata(m)
return m
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
# for the --sql use case, run migrations for each URL into
# individual files.
engines = {'': {'url': context.config.get_main_option('sqlalchemy.url')}}
for name in bind_names:
engines[name] = rec = {}
rec['url'] = context.config.get_section_option(name,
"sqlalchemy.url")
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
file_ = "%s.sql" % name
logger.info("Writing output to %s" % file_)
with open(file_, 'w') as buffer:
context.configure(url=rec['url'], output_buffer=buffer,
target_metadata=get_metadata(name),
literal_binds=True)
with context.begin_transaction():
context.run_migrations(engine_name=name)
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
# this callback is used to prevent an auto-migration from being generated
# when there are no changes to the schema
# reference: http://alembic.readthedocs.org/en/latest/cookbook.html
def process_revision_directives(context, revision, directives):
if getattr(config.cmd_opts, 'autogenerate', False):
script = directives[0]
if len(script.upgrade_ops_list) >= len(bind_names) + 1:
empty = True
for upgrade_ops in script.upgrade_ops_list:
if not upgrade_ops.is_empty():
empty = False
if empty:
directives[:] = []
logger.info('No changes in schema detected.')
# for the direct-to-DB use case, start a transaction on all
# engines, then run all migrations, then commit all transactions.
engines = {'': {'engine': engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)}}
for name in bind_names:
engines[name] = rec = {}
rec['engine'] = engine_from_config(
context.config.get_section(name),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
for name, rec in engines.items():
engine = rec['engine']
rec['connection'] = conn = engine.connect()
if USE_TWOPHASE:
rec['transaction'] = conn.begin_twophase()
else:
rec['transaction'] = conn.begin()
try:
for name, rec in engines.items():
logger.info("Migrating database %s" % (name or '<default>'))
context.configure(
connection=rec['connection'],
upgrade_token="%s_upgrades" % name,
downgrade_token="%s_downgrades" % name,
target_metadata=get_metadata(name),
process_revision_directives=process_revision_directives,
**current_app.extensions['migrate'].configure_args
)
context.run_migrations(engine_name=name)
if USE_TWOPHASE:
for rec in engines.values():
rec['transaction'].prepare()
for rec in engines.values():
rec['transaction'].commit()
except:
for rec in engines.values():
rec['transaction'].rollback()
raise
finally:
for rec in engines.values():
rec['connection'].close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| mit |
lucperkins/heron | heron/common/src/python/utils/log.py | 1 | 3380 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' log.py '''
import logging
from logging.handlers import RotatingFileHandler
import colorlog
# Create the logger
# pylint: disable=invalid-name
logging.basicConfig()
Log = logging.getLogger()
# time formatter - date - time - UTC offset
# e.g. "08/16/1988 21:30:00 +1030"
# see time formatter documentation for more
date_format = "%Y-%m-%d %H:%M:%S %z"
def configure(level=logging.INFO, logfile=None):
""" Configure logger which dumps log on terminal
:param level: logging level: info, warning, verbose...
:type level: logging level
:param logfile: log file name, default to None
:type logfile: string
:return: None
:rtype: None
"""
# Remove all the existing StreamHandlers to avoid duplicate
for handler in Log.handlers:
if isinstance(handler, logging.StreamHandler):
Log.handlers.remove(handler)
Log.setLevel(level)
# if logfile is specified, FileHandler is used
if logfile is not None:
log_format = "[%(asctime)s] [%(levelname)s]: %(message)s"
formatter = logging.Formatter(fmt=log_format, datefmt=date_format)
file_handler = logging.FileHandler(logfile)
file_handler.setFormatter(formatter)
Log.addHandler(file_handler)
# otherwise, use StreamHandler to output to stream (stdout, stderr...)
else:
log_format = "[%(asctime)s] %(log_color)s[%(levelname)s]%(reset)s: %(message)s"
# pylint: disable=redefined-variable-type
formatter = colorlog.ColoredFormatter(fmt=log_format, datefmt=date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
Log.addHandler(stream_handler)
def init_rotating_logger(level, logfile, max_files, max_bytes):
"""Initializes a rotating logger
It also makes sure that any StreamHandler is removed, so as to avoid stdout/stderr
constipation issues
"""
logging.basicConfig()
root_logger = logging.getLogger()
log_format = "[%(asctime)s] [%(levelname)s] %(filename)s: %(message)s"
root_logger.setLevel(level)
handler = RotatingFileHandler(logfile, maxBytes=max_bytes, backupCount=max_files)
handler.setFormatter(logging.Formatter(fmt=log_format, datefmt=date_format))
root_logger.addHandler(handler)
for handler in root_logger.handlers:
root_logger.debug("Associated handlers - " + str(handler))
if isinstance(handler, logging.StreamHandler):
root_logger.debug("Removing StreamHandler: " + str(handler))
root_logger.handlers.remove(handler)
def set_logging_level(cl_args):
"""simply set verbose level based on command-line args
:param cl_args: CLI arguments
:type cl_args: dict
:return: None
:rtype: None
"""
if 'verbose' in cl_args and cl_args['verbose']:
configure(logging.DEBUG)
else:
configure(logging.INFO)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/confidentialledger/azure-confidentialledger/azure/confidentialledger/_generated/_generated_identity/v0_1_preview/_confidential_ledger_identity_service_client.py | 1 | 3766 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import ConfidentialLedgerIdentityServiceClientConfiguration
from .operations import ConfidentialLedgerIdentityServiceOperations
from . import models
class ConfidentialLedgerIdentityServiceClient(object):
"""The ConfidentialLedgerIdentityServiceClient is used to retrieve the TLS certificate required for connecting to a Confidential Ledger.
:ivar confidential_ledger_identity_service: ConfidentialLedgerIdentityServiceOperations operations
:vartype confidential_ledger_identity_service: azure.confidentialledger._generated/_generated_identity.v0_1_preview.operations.ConfidentialLedgerIdentityServiceOperations
:param identity_service_uri: The Identity Service URL, for example https://identity.accledger.azure.com.
:type identity_service_uri: str
"""
def __init__(
self,
identity_service_uri, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{identityServiceUri}'
self._config = ConfidentialLedgerIdentityServiceClientConfiguration(identity_service_uri, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.confidential_ledger_identity_service = ConfidentialLedgerIdentityServiceOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'identityServiceUri': self._serialize.url("self._config.identity_service_uri", self._config.identity_service_uri, 'str', skip_quote=True),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ConfidentialLedgerIdentityServiceClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| mit |
basicthinker/THNVM | src/python/m5/config.py | 91 | 2085 | # Copyright (c) 2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import os
from os.path import isdir, isfile, join as joinpath
confdir = os.environ.get('M5_CONFIG')
if not confdir:
# HOME is not set when running regressions, due to use of scons
# Execute() function.
homedir = os.environ.get('HOME')
if homedir and isdir(joinpath(homedir, '.m5')):
confdir = joinpath(homedir, '.m5')
def get(name):
if not confdir:
return None
conffile = joinpath(confdir, name)
if not isfile(conffile):
return None
return conffile
| bsd-3-clause |
robmcdan/python-goose | goose/extractors/__init__.py | 18 | 1179 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class BaseExtractor(object):
def __init__(self, config, article):
# config
self.config = config
# parser
self.parser = self.config.get_parser()
# article
self.article = article
# stopwords class
self.stopwords_class = config.stopwords_class
| apache-2.0 |
chhao91/QGIS | tests/src/python/test_qgsblendmodes.py | 7 | 7978 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsblendmodes.py
---------------------
Date : May 2013
Copyright : (C) 2013 by Nyall Dawson, Massimo Endrighi
Email : nyall dot dawson at gmail.com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'May 2013'
__copyright__ = '(C) 2013, Nyall Dawson, Massimo Endrighi'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis
import os
from PyQt4.QtCore import QSize
from PyQt4.QtGui import QPainter, QColor
from qgis.core import (QgsVectorLayer,
QgsVectorSimplifyMethod,
QgsMapLayerRegistry,
QgsMultiRenderChecker,
QgsRasterLayer,
QgsMultiBandColorRenderer,
QgsRectangle
)
from utilities import (unitTestDataPath,
getQgisTestApp,
TestCase,
unittest
)
# Convenience instances in case you may need them
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsBlendModes(TestCase):
def __init__(self, methodName):
"""Run once on class initialisation."""
unittest.TestCase.__init__(self, methodName)
# initialize class MapRegistry, Canvas, MapRenderer, Map and PAL
self.mMapRegistry = QgsMapLayerRegistry.instance()
# create point layer
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.mPointLayer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
self.mMapRegistry.addMapLayer(self.mPointLayer)
self.mSimplifyMethod = QgsVectorSimplifyMethod()
self.mSimplifyMethod.setSimplifyHints(QgsVectorSimplifyMethod.NoSimplification)
# create polygon layer
myShpFile = os.path.join(TEST_DATA_DIR, 'polys.shp')
self.mPolygonLayer = QgsVectorLayer(myShpFile, 'Polygons', 'ogr')
self.mPolygonLayer.setSimplifyMethod(self.mSimplifyMethod)
self.mMapRegistry.addMapLayer(self.mPolygonLayer)
# create line layer
myShpFile = os.path.join(TEST_DATA_DIR, 'lines.shp')
self.mLineLayer = QgsVectorLayer(myShpFile, 'Lines', 'ogr')
self.mLineLayer.setSimplifyMethod(self.mSimplifyMethod)
self.mMapRegistry.addMapLayer(self.mLineLayer)
# create two raster layers
myRasterFile = os.path.join(TEST_DATA_DIR, 'rgb256x256.png')
self.mRasterLayer1 = QgsRasterLayer(myRasterFile, "raster1")
self.mRasterLayer2 = QgsRasterLayer(myRasterFile, "raster2")
myMultiBandRenderer1 = QgsMultiBandColorRenderer(self.mRasterLayer1.dataProvider(), 1, 2, 3)
self.mRasterLayer1.setRenderer(myMultiBandRenderer1)
self.mMapRegistry.addMapLayer(self.mRasterLayer1)
myMultiBandRenderer2 = QgsMultiBandColorRenderer(self.mRasterLayer2.dataProvider(), 1, 2, 3)
self.mRasterLayer2.setRenderer(myMultiBandRenderer2)
self.mMapRegistry.addMapLayer(self.mRasterLayer2)
# to match blend modes test comparisons background
self.mCanvas = CANVAS
self.mCanvas.setCanvasColor(QColor(152, 219, 249))
self.mMap = self.mCanvas.map()
self.mMap.resize(QSize(400, 400))
self.mapSettings = self.mCanvas.mapSettings()
self.mapSettings.setOutputSize(QSize(400, 400))
self.mapSettings.setOutputDpi(96)
self.extent = QgsRectangle(-118.8888888888887720, 22.8002070393376783, -83.3333333333331581, 46.8719806763287536)
def testVectorBlending(self):
"""Test that blend modes work for vector layers."""
#Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
#Set blending modes for both layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_Difference)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_Difference)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_blendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_blendmodes", 20)
myMessage = ('vector blending failed')
assert myResult, myMessage
#Reset layers
self.mLineLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
self.mPolygonLayer.setBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorFeatureBlending(self):
"""Test that feature blend modes work for vector layers."""
#Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
#Set feature blending for line layer
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_Plus)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_featureblendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_featureblendmodes", 20)
myMessage = ('vector feature blending failed')
assert myResult, myMessage
#Reset layers
self.mLineLayer.setFeatureBlendMode(QPainter.CompositionMode_SourceOver)
def testVectorLayerTransparency(self):
"""Test that layer transparency works for vector layers."""
#Add vector layers to map
myLayers = []
myLayers.append(self.mLineLayer.id())
myLayers.append(self.mPolygonLayer.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.extent)
#Set feature blending for line layer
self.mLineLayer.setLayerTransparency(50)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_vector_layertransparency")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
myResult = checker.runTest("vector_layertransparency", 20)
myMessage = ('vector layer transparency failed')
assert myResult, myMessage
def testRasterBlending(self):
"""Test that blend modes work for raster layers."""
#Add raster layers to map
myLayers = []
myLayers.append(self.mRasterLayer1.id())
myLayers.append(self.mRasterLayer2.id())
self.mapSettings.setLayers(myLayers)
self.mapSettings.setExtent(self.mRasterLayer1.extent())
#Set blending mode for top layer
self.mRasterLayer1.setBlendMode(QPainter.CompositionMode_Difference)
checker = QgsMultiRenderChecker()
checker.setControlName("expected_raster_blendmodes")
checker.setMapSettings(self.mapSettings)
checker.setColorTolerance(1)
checker.setColorTolerance(1)
myResult = checker.runTest("raster_blendmodes", 20)
myMessage = ('raster blending failed')
assert myResult, myMessage
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
wd5/jangr | django/conf/locale/fr/formats.py | 232 | 1530 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause |
marcoscaceres/bedrock | bedrock/grants/grants_db.py | 43 | 39061 | # -*- coding: utf-8 -*-
from collections import namedtuple
Grant = namedtuple('Grant', 'url, grantee, location, title, type, total_support, '
'year, description, break_down, urls')
GRANTS = [
Grant(
u'dream-yard',
u'DreamYard Project',
u'United States',
u'Hive Fashion DreamYard Summer Intensive',
u'learning-webmaking',
u'$8,250',
2012,
u'<p> Mozilla provided a grant to <a href="http://www.dreamyard.com/">'
u'DreamYard Arts Center</a> in the Bronx, NY, in conjunction with '
u'<a href="http://explorecreateshare.org/2012/07/20/'
u'next-seasons-hottest-trend-hive-fashion/">Hive Fashion</a>, '
u'to support a DIY Fashion intensive for teens in August 2012.</p>',
u'',
u'',
),
Grant(
u'compumentor',
u'Compumentor',
u'United States',
u'2007 TechSoup Netsquared Conference',
u'free-culture-community',
u'$2,000',
2007,
u'<p>Mozilla contributed to the 2007 TechSoup <a href="http://www.netsquared.org">'
u'Netsquared Conference</a> Innovation Fund to support innovative software applications '
u'created by and for non-profit organizations.</p>',
u'',
u'',
),
Grant(
u'codethink',
u'Codethink Ltd.',
u'United Kingdom',
u'Accessibility Research',
u'open-source-technology',
u'$4,427',
2007,
u'<p>Mozilla made a grant to <a href="http://www.codethink.co.uk/">Codethink Ltd.</a> '
u'to do a feasibility study for migrating the AT-SPI accessibility '
u'interface to use D-Bus.</p>',
u'',
u'',
),
Grant(
u'charles-chen',
u'Charles Chen',
u'United States',
u'Fire Vox',
u'open-source-technology',
u'$11,976',
2007,
u'<p>Mozilla supported the work of Charles Chen to implement ARIA widgets in the '
u'<a href="http://www.accessfirefox.org/Fire_Vox.php">Fire Vox</a> open source '
u'screen reader extension for Firefox.</p>',
u'',
u'',
),
Grant(
u'ariel-rios',
u'Ariel Rios',
u'United States',
u'GNOME Accessibility',
u'open-source-technology',
u'$12,471',
2007,
u'<p>Mozilla supported the work of Ariel Rios to implement the AT-SPI Collection '
u'interface for better Firefox accessibility on Linux.</p>',
u'',
u'',
),
Grant(
u'aapd',
u'American Association of People with Disabilities',
u'United States',
u'AAPD',
u'free-culture-community',
u'$1,000',
2007,
u'<p>Mozilla sponsored the <a href="http://www.aapd.com/">AAPD</a> Leadership Gala '
u'and related activities.</p>',
u'',
u'',
),
Grant(
u'peoples-production-house',
u'People’s Production House',
u'United States',
u'World’s Fair 2.0 Design Intensive Incubator',
u'learning-webmaking',
u'$14,500',
2012,
u'<p>This grant to the <a href="http://peoplesproductionhouse.org/">'
u'People’s Production House</a> supported the implementation of three '
u'design workshops for youth in conjunction with World’s Fair 2.0, a '
u'cell-phone based journalism scavenger hunt that investigates the borough '
u'of Queens’ history - past and present. The final Design Intensive '
u'took place during Maker Faire, and involved youth in the installation of '
u'their work at the New York Hall of Science.</p>',
u'',
u'',
),
Grant(
u'participatory-culture-foundation',
u'Participatory Culture Foundation',
u'United States',
u'NewsHour Open Election 2012',
u'free-culture-community',
u'$266,530.42',
2012,
u'<p>As part of the NewsHour Open Election 2012 project, supported by the '
u'Corporation for Public Broadcasting, and in partnership with PBS NewsHour and '
u'Mozilla, the <a href="http://pculture.org/">Participatory Culture Foundation</a> '
u'has received support to develop crowd-sourcing technologies to enable citizen '
u'volunteers to translate and caption 2012 election coverage into dozens of languages, '
u'as well as for the deaf and hard-of-hearing. These technologies will make election '
u'news, speeches and debates more accessible for diverse audiences, helping to increase '
u'their understanding of, and engagement in, the political process.</p>',
u'',
u'',
),
Grant(
u'global-kids-inc',
u'Global Kids Inc.',
u'United States',
u'PenPal News',
u'learning-webmaking',
u'$15,000',
2012,
u'<p> Mozilla provided a grant to <a href="http://www.globalkids.org/">'
u'Global Kids Inc.</a>, in conjunction with Hive NYC, for the development of '
u'PenPal News software. PenPal News is a web app that uses news as a '
u'conversation-starter to connect middle and high school-aged youth '
u'domestically and internationally.</p>',
u'',
u'',
),
Grant(
u'public_knowledge',
u'Public Knowledge',
u'United States',
u'Public Knowledge',
u'user-sovereignty',
u'$5,000',
2012,
u'<p><a href="http://www.publicknowledge.org/">Public Knowledge</a> preserves the '
u'openness of the Internet and the public’s access to knowledge, promotes creativity '
u'through balanced copyright, and upholds and protects the rights of consumers to use '
u'innovative technology lawfully.</p>',
u'',
u'',
),
Grant(
u'institute_of_play',
u'Institute of Play',
u'United States',
u'Hive Activity Delivery Mechanism',
u'learning-webmaking',
u'$12,604',
2012,
u'<p>This grant to the <a href="http://www.instituteofplay.org/">Institute of Play</a> '
u'provided support for the Hive Activity Delivery Mechanism Project, which seeks to '
u'develop a sharing model for Hive-developed learning activities that represents the '
u'collaboration, experimentation and youth-focus that typifies the '
u'Hive and its members.</p>',
u'',
u'',
),
Grant(
u'cbc',
u'CBC Radio Canada',
u'Canada',
u'Marshall McLuhan Project',
u'free-culture-community',
u'$10,000',
2011,
u'<p>This grant was given to the <a href="http://www.cbc.ca">'
u'Canadian Broadcasting Corporation</a> to support the creation of on-line '
u'content to engage Canadians in the celebration of the 100th anniversary of '
u'the birth of Marshall McLuhan.</p>',
u'',
u'',
),
Grant(
u'big-blue-button',
u'Blindside Networks',
u'Canada',
u'BigBlueButton',
u'open-source-technology',
u'$11,000',
2011,
u'<p><a href="http://www.blindsidenetworks.com/">Blindside Networks</a> '
u'is a company dedicated to helping universities, colleges, and commercial '
u'companies deliver a high-quality learning experience to remote students. '
u'The goal of the BigBlueButton open source project is to enable remote students '
u'to have a high-quality learning experience. This grant supported converting '
u'BigBlueButton 0.8-beta to use popcorn.js, the HTML5 media framework designed '
u'for synchronized playback of media.</p>',
u'',
u'',
),
Grant(
u'depaul-university',
u'DePaul University',
u'United States',
u'Digital Youth Mentor',
u'learning-webmaking',
u'$25,000',
2011,
u'<p>This grant was made to <a href="http://www.depaul.edu">DePaul University</a> '
u'to support the employment of a Digital Youth Mentor.</p>',
u'',
u'',
),
Grant(
u'new-youth-city',
u'New Youth City Learning Network',
u'United States',
u'Hackasaurus',
u'learning-webmaking',
u'$25,000',
2011,
u'<p>This grant to the <a href="http://dmlcentral.net/projects/3658">'
u'New Youth City Learning Network</a> at the Social Science Research Centre '
u'supported the development of Hackasaurus. Hackasaurus is a set of tools that '
u'are under development to help teenagers closely review, remix and redesign '
u'the Web. Hackasaurus was prototyped with youth over the course of several '
u'workshops and jam days in New York and Chicago.</p>',
u'',
u'',
),
Grant(
u'henrik-moltke',
u'Henrik Moltke',
u'Germany',
u'Hyperaudio',
u'free-culture-community',
u'$10,000',
2011,
u'<p>This grant supported the development of a compelling concept and implementation '
u'plan for the <a href="http://www.hyperaudio.org/">Hyperaudio</a> project.</p>',
u'',
u'',
),
Grant(
u'bay-area-video-coalition',
u'Bay Area Video Coalition',
u'United States',
u'Zero Divide/Mozilla Youth Media Project',
u'open-source-technology',
u'$88,500',
2012,
u'<p>The <a href="http://www.bavc.org/">Bay Area Video Coalition (BAVC)</a> '
u'was an implementation partner in the Mozilla Foundation/Zero Divide youth '
u'media project in 2011. They worked together to test software prototypes for '
u'Butter, a user interface for WebMadeMovies; to instruct and lead youth '
u'participants to create 3-4 web-native productions with these tools; and to '
u'create a modular, openly-licensed curriculum to make it easier for people to '
u'create HTML5/open video projects of their own.</p><p>In 2012, Mozilla provided '
u'a grant to BAVC to support the <a href="http://bavc.org/creative_code">'
u'Open Source track at BAVC’s Digital Pathways</a>, as part of a broader partnership '
u'between BAVC and Mozilla to encourage next-generation integrated '
u'learning and career skills.</p>',
{
u'2011': ['Amount: $73,500'],
u'2012': ['Amount: $15,000']
},
u'',
),
Grant(
u'universal-subtitles',
u'Universal Subtitles',
u'United States',
u'Universal Subtitles',
u'free-culture-community',
u'$100,000',
2011,
u'<p>In 2011, Mozilla provided a grant to support the development of '
u'<a href="http://www.universalsubtitles.org">Universal Subtitles</a> '
u'(now known as Amara). Amara gives individuals, communities, and larger '
u'organizations the power to overcome accessibility and language barriers '
u'for online video. The tools are free and open source and make the work of '
u'subtitling and translating video simpler, more appealing, and, most of all, '
u'more collaborative.</p>',
u'',
u'',
),
Grant(
u'adaptive-technology-resource-centre',
u'Adaptive Technology Resource Centre',
u'Canada',
u'Adaptive Technology Resource Centre',
u'open-source-technology',
u'$10,000',
2006,
u'<p>This grant was made to the Adaptive Technology Resource Centre at '
u'the University of Toronto (now the <a href="http://idrc.ocad.ca/">'
u'Inclusive Design Research Centre</a> at the Ontario College of Art and Design). '
u'It enabled the development of an accessible Thunderbird user interface as well as '
u'its adoption through evangelism, mentoring, community-building, and technical '
u'leadership, with a focus on working with the jQuery community to implement ARIA '
u'support in this popular toolkit.</p>',
u'',
u'',
),
Grant(
u'benetech',
u'Benetech',
u'United States',
u'Benetech DAISY Reader for Firefox',
u'free-culture-community',
u'$50,000',
2009,
u'<p>Mozilla provided funding over two years to <a href="http://www.benetech.org/">'
u'Benetech</a>, a corporation dedicated to leveraging technology innovation and '
u'business expertise to solve unmet social needs. This funding supports the development '
u'of an open source, browser-based DAISY reader that enables people with print '
u'disabilities to read accessible text using Firefox.</p>',
{
u'2008': ['Amount: $25,000'],
u'2009': ['Amount: $25,000']
},
u'',
),
Grant(
u'nvda',
u'NV Access',
u'Australia',
u'NVDA Screen Reader',
u'open-source-technology',
u'$135,000',
2010,
u'<p>Mozilla made grants to <a href="http://www.nvaccess.org/">NV Access</a> '
u'from 2007 to 2010 to support the development of '
u'<a href="http://www.nvda-project.org/">NonVisual Desktop Access (NVDA)</a>, '
u'a free and open source screen reader for the Microsoft Windows operating system. '
u'Providing feedback via synthetic speech and Braille, it enables blind or vision '
u'impaired people to access computers running Windows for no more '
u'cost than a sighted person.</p>',
{
u'2007': ['Initial Support: $10,000', 'Support for full time work of James Teh: $80,000'],
u'2009': ['Expanding work: $25,000'],
u'2010': ['Growing influence: $20,000']
},
[
u'http://www.nvda-project.org/blog/'
u'Mozilla_Foundation_grant_allows_for_employment_of_NVDA_full-time_developer',
u'http://www.nvda-project.org/blog/First_Work_on_Web_Access_Grant',
u'http://www.nvda-project.org/blog/NewMozillaGrantFurthersNVDA',
u'http://www.nvda-project.org/blog/NVDAPresentationAtCSUN2009'
]
),
Grant(
u'firebug-accessibility',
u'University of Illinois Urbana-Champaign & The Paciello Group ',
u'United States',
u'Firebug Accessibility',
u'open-source-technology',
u'$120,009',
2010,
u'<p>This grant provided funds to the <a href="http://illinois.edu/">'
u'University of Illinois Urbana-Champaign</a> and '
u'<a href="http://www.paciellogroup.com/">The Paciello Group</a> in 2009 '
u'and 2010 for their joint work on Firebug accessibility. The goal was to '
u'mainstream accessibility for web applications by building accessibility '
u'testing functions and associated test cases into '
u'<a href="http://getfirebug.com/">Firebug</a>, a popular tool used by many '
u'web developers.</p>',
{
u'2009': ['Phase One: $25,000', 'Phase Two: $25,000', 'Phase Three: $25,000'],
u'2010': ['Phase Four: $25,000', 'Phase Five: $20,009']
},
u'',
),
Grant(
u'vquence',
u'Vquence',
u'Australia',
u'Vquence',
u'open-source-technology',
u'$75,000',
2010,
u'<p>In the spring of 2008 Mozilla became concerned about the lack of '
u'support for deaf and blind Firefox users. Mozilla identified '
u'<a href="http://www.gingertech.net/">Dr. Silvia Pfeiffer</a> and her '
u'company Vquence as the best resource for creating a plan for open '
u'video accessibility. By providing grants in 2008, 2009 and 2010, '
u'Mozilla supported the technology that implemented Firefox video '
u'accessibility features, such as text subtitles for the hearing-impaired '
u'and audio descriptions for blind users.</p>',
{
u'2008': ['Amount: $25,000'],
u'2009': ['Amount: $25,000'],
u'2010': ['Amount: $25,000']
},
[
u'http://frankhecker.com/2009/06/30/new-mozilla-accessibility-projects/',
]
),
Grant(
u'web4all',
u'World Wide Web Consortium',
u'UK',
u'Web4All Conference',
u'free-culture-community',
u'$4,000',
2010,
u'<p>Mozilla has sponsored the <a href="http://www.w4a.info/">Web4All Conference</a> '
u'for several years, and has also sponsored several speakers to be able to attend. '
u'The Web4All Conference is an annual cross-disciplinary gathering focused on '
u'Scientific Enquiry, Research, Development and Engineering. Views bridge academia, '
u'commerce and industry, and arguments encompassing a range of beliefs across the '
u'design-accessibility spectrum are presented.</p>',
{
u'2007': ['Amount: $1,000'],
u'2008': ['Amount: $1,000'],
u'2009': ['Amount: $1,000'],
u'2010': ['Amount: $1,000'],
},
u'',
),
Grant(
u'creative-commons',
u'Creative Commons',
u'United States',
u'Creative Commons Pledge',
u'free-culture-community',
u'$300,000',
2010,
u'<p>In December 2007, Mozilla decided to participate in '
u'<a href="http://creativecommons.org/">Creative Commons</a> "5x5 Challenge." '
u'Beginning in 2008, Mozilla pledged $100,000 per year for five years to support '
u'open licensing on the web, developing hybrid organizations, and maturing the '
u'concept of the web as an ecology of shared ideas.</p>',
{
u'2008': ['Amount: $100,000'],
u'2009': ['Amount: $100,000'],
u'2010': ['Amount: $100,000'],
},
u'',
),
Grant(
u'foms',
u'Annodex Association',
u'Australia',
u'Foundations of Open Media Software Workshop',
u'free-culture-community',
u'$15,000',
2009,
u'<p>These grants provided sponsorship for the 2007, 2008 and 2009 '
u'<a href="http://www.foms-workshop.org">Foundations of Open Media Software (FOMS)</a> '
u'workshop in Hobart, Australia. The bulk of these funds were used to cover the travel '
u'expenses of key participants who otherwise would have been unable to attend. '
u'This meeting hosts important discussions on open codecs, HTML specifications, '
u'browsers and hands-on work towards specifications for video in browsers.</p>',
{
u'2007': ['Amount: $5,000'],
u'2008': ['Amount: $5,000'],
u'2009': ['Amount: $5,000']
},
u'',
),
Grant(
u'free-culture-conference',
u'Berkeley Center for Law and Technology',
u'United States',
u'Free Culture Conference',
u'free-culture-community',
u'$5,000',
2008,
u'<p>This grant provided sponsorship for the Free Culture Conference put '
u'on by the <a href="http://www.law.berkeley.edu/bclt.htm">'
u'Berkeley Center for Law and Technology</a>, held October 11 and 12, 2008 '
u'in Berkeley, California. The Free Culture Conference is a yearly touchstone '
u'event for the advancement of free cultures, where members are free to '
u'participate without artificial limits.</p>',
u'',
u'',
),
Grant(
u'fscons',
u'FFKP',
u'Sweden',
u'Free Society Conference and Nordic Summit',
u'free-culture-community',
u'$1,300',
2009,
u'<p>This grant provided sponsorship for the third '
u'<a href="https://fscons.org/2009/">Free Society Conference and '
u'Nordic Summit (FSCONS)</a> held November 13-15, 2009, in Goteborg, Sweden. '
u'FSCONS is jointly organized by Free Software Foundation Europe, '
u'Creative Commons and Wikipedia Sverige.</p>',
u'',
u'',
),
Grant(
u'free-software-foundation',
u'Free Software Foundation',
u'United States',
u'LinuxBIOS Support',
u'free-culture-community',
u'$10,000',
2007,
u'<p>In 2007, Mozilla provided $10,000 to support the LinuxBIOS-related '
u'activities of the <a href="http://www.fsf.org/">Free Software Foundation</a>. '
u'This grant went toward software development, infrastructure and communications. '
u'The Free Software Foundation ported coreboot to the alix.2c3 board, a board '
u'useful in building routers, firewalls, and wifi access points.</p>',
u'',
u'',
),
Grant(
u'gnome',
u'GNOME',
u'United States',
u'GNOME Accessibility',
u'open-source-technology',
u'$48,000',
2010,
u'<p>Mozilla offered grants in support of '
u'<a href="http://projects.gnome.org/outreach/a11y/">GNOME’s Outreach '
u'Program for Accessibility</a>. The <a href="http://www.gnome.org/">'
u'GNOME Foundation</a> sponsors the GNOME project to provide a free desktop '
u'environment for Linux systems. Mozilla and GNOME have been longtime '
u'collaborators on open source and accessibility issues.</p><p>See the '
u'<a href="reports/gnome-haeger-report/">grant final report</a> for more details.</p>',
{
u'2007': ['General Accessibility Support: $10,000'],
u'2008': ['Orca rich document browsing extension: $8,000'],
u'2009': ['GNOME Outreach Program: Accessibility: $10,000', 'CSUN Accessibility Conference: $10,000'],
u'2010': ['General Accessibility Support: $10,000']
},
[
u'https://blog.mozilla.org/blog/2010/02/04/mozilla-gnome-accessibility/',
]
),
Grant(
u'ifosslr',
u'International Free and Open Source Software Law Review (IFOSSLR)',
u'Europe',
u'IFOSSLR Launch',
u'user-sovereignty',
u'$10,000',
2009,
u'<p>This grant funded the launch of the <a href="http://www.ifosslr.org/">'
u'International Free and Open Source Software Law Review (IFOSSLR)</a>, a '
u'collaborative legal publication aiming to increase knowledge and understanding '
u'among lawyers about Free and Open Source Software issues. Topics included copyright, '
u'licence implementation, licence interpretation, software patents, open standards, '
u'case law and statutory changes.</p>',
u'',
u'',
),
Grant(
u'mozdev',
u'MozDev',
u'United States',
u'MozDev Support',
u'open-source-technology',
u'$90,000',
2008,
u'<p>Mozilla supported the <a href="http://www.mozdev.org/about.html">'
u'MozDev Community Organization</a> by providing general funds to support '
u'MozDev’s operations. MozDev is a software development community dedicated '
u'to making quality applications and extensions freely available to all computer '
u'users. Its goal is to help establish Mozilla as a viable application development '
u'platform. Since 2006, Mozilla grants have funded the majority of MozDev’s budget. '
u'This support gives back to the community that contributes so much to establishing '
u'Mozilla as a viable application development platform and the community that builds '
u'quality applications and extensions.</p>',
{
u'2006': ['Amount: $30,000'],
u'2007': ['Amount: $30,000'],
u'2008': ['Amount: $30,000']
},
u'',
),
Grant(
u'nonprofit-software-development-summit',
u'Aspiration',
u'United States',
u'Nonprofit Software Development Summit',
u'free-culture-community',
u'$5,000',
2009,
u'<p>This grant supported the <a href="http://www.aspirationtech.org/events/devsummit09">'
u'Nonprofit Software Development Summit</a>, held November 18-20, 2009 in Oakland. '
u'This was the third annual convening of people and organizations developing software '
u'tools, web applications and other technology to support nonprofits and social '
u'justice causes. <a href="http://www.aspirationtech.org/">Aspiration</a>, '
u'the conference organizer, is a non-profit organization that connects nonprofits '
u'with software solutions that help them better carry out their work.</p>',
u'',
u'',
),
Grant(
u'open-source-software-institute',
u'Open Source Software Institute',
u'United States',
u'OCSP Stapling',
u'open-source-technology',
u'$30,000',
2007,
u'<p>This grant to the <a href="http://www.oss-institute.org/">'
u'Open Source Software Institute</a>, in cooperation with the NSS '
u'development team and Mozilla developers, investigated the problem of '
u'providing OCSP stapling support for Apache and other open source '
u'SSL/TLS-enabled server software incorporating the OpenSSL library. '
u'The Open Source Software Institute (OSSI) was identified as having '
u'extensive experience with OpenSSL, and was the lead organization '
u'responsible for getting US government FIPS 140-2 validation of OpenSSL.</p>',
u'',
u'',
),
Grant(
u'open-video-alliance',
u'Open Video Alliance',
u'United States',
u'Open Video Alliance',
u'free-culture-community',
u'$30,000',
2009,
u'<p>Mozilla offered support to <a href="http://openvideoalliance.org/">'
u'Open Video Alliance</a> activities in support of the open video movement. '
u'Open Video Alliance is a coalition of organizations and individuals committed '
u'to the idea that the power of the moving image should belong to everyone. '
u'This grant funded various efforts in the open video movement, such as the '
u'operations of openvideoalliance.org, the branding of open video products, '
u'outreach to the public media, fundraising and video production.</p>',
u'',
u'',
),
Grant(
u'perl-foundation',
u'Perl Foundation',
u'United States',
u'Perl6 Support',
u'open-source-technology',
u'$10,000',
2007,
u'<p>Mozilla provided a grant to the <a href="http://www.perlfoundation.org/">'
u'Perl Foundation</a>, a non-profit dedicated to the advancement of the Perl '
u'programming language through open discussion, collaboration, design and code. '
u'This grant supported the development of Perl 6.</p>',
u'',
u'',
),
Grant(
u'personal-democracy-forum',
u'Personal Democracy Forum',
u'United States',
u'Personal Democracy Forum',
u'user-sovereignty',
u'$15,000',
2009,
u'<p>For two years Mozilla sponsored the <a href="http://personaldemocracy.com/'
u'pdf-conference/personal-democracy-forum-conference">Personal Democracy Forum</a>, '
u'a forum for discussion on how politics and technology intersect. Each year top '
u'opinion-makers, political practitioners, technologists and journalists come '
u'together to network, exchange ideas and explore how technology and the internet '
u'are changing politics, democracy and society.</p>',
{
u'2008': ['Amount: $10,000'],
u'2009': ['Amount: $5,000']
},
u'',
),
Grant(
u'software-freedom-conservancy',
u'Software Freedom Conservancy',
u'United States',
u'Software Freedom Conservancy',
u'free-culture-community',
u'$30,000',
2012,
u'<p>Mozilla provided funding to help the '
u'<a href="http://conservancy.softwarefreedom.org/">Software Freedom Conservancy</a> '
u'serve additional open source projects and work more closely with peer projects. '
u'As from 2008, Mozilla\'s funding helped the Conservancy to provide administrative, '
u'financial management, coordination and logistical services to twenty FLOSS '
u'(Free, Libre and Open Source Software) projects including Foresight Linux, '
u'Sugar Labs, jQuery, Amarok, Darcs, OpenInkpot, and K-3D.</p>',
{
u'2008': ['Amount: $10,000'],
u'2009': ['Amount: $10,000'],
u'2012': ['Amount: $10,000']
},
u'',
),
Grant(
u'seneca',
u'Seneca College',
u'Canada',
u'Seneca College',
u'learning-webmaking',
u'$327,860',
2011,
u'<p>Since 2005, <a href="http://www.senecac.on.ca/">Seneca College</a> '
u'in Toronto has worked closely with the Mozilla community to create a set '
u'of Mozilla-specific courses, engage hundreds of students directly in Mozilla '
u'development projects, and host and record dozens of Mozilla events and talks. '
u'Seneca’s faculty and students are key contributors to the Mozilla project, '
u'and have gained significant experience bootstrapping new contributors into the '
u'Mozilla technology and culture. Seneca College of Applied Arts and Technology is a '
u'community college for applied arts and technology in Toronto, Ontario. </p>',
{
u'2006': ['Amount: $50,000'],
u'2007': ['Amount: $100,000'],
u'2009': ['Amount: $80,910'],
u'2011': ['Amount: $96,950']
},
u'',
),
Grant(
u'leigh-school',
u'Leigh School',
u'New Zealand',
u'Leigh School',
u'learning-webmaking',
u'$2,500',
2009,
u'<p>This grant is supporting ICT components for courses and the purchase of '
u'equipment and software to support the ICT components of courses at '
u'<a href="http://www.leigh.school.nz/">Leigh School</a>, a primary school in '
u'New Zealand dedicated to a broad curriculum that includes computers and technology.</p>',
u'',
u'',
),
Grant(
u'peer2peer-university',
u'Phillip Schmidt (P2PU)',
u'United States',
u'Peer2Peer University',
u'learning-webmaking',
u'$25,500',
2011,
u'<p>Mozilla issued a grant to Phillip Schmidt in 2009 '
u'(<a href="http://www.p2pu.org/">P2PU</a>) to enable the creation of '
u'an online course called <a href="https://wiki.mozilla.org/Education/EduCourse">'
u'Open|Web|Content|Education</a>, where educators learned about open content licensing, '
u'open web technologies and open teaching methods. In 2011, Mozilla provided a '
u'grant to P2PU to support <a href="https://p2pu.org/en/schools/school-of-webcraft/sets/'
u'webmaking-101/">Webmaking 101</a> and the <a href="https://p2pu.org/en/groups/schools/'
u'school-of-webcraft/">School of Webcraft</a> community coordination.</p><p>P2PU combines '
u'open educational resources, structured courses, and recognition of knowledge and '
u'learning to offer high-quality low-cost education opportunities. It is run and '
u'governed by volunteers.</p>',
{
u'2009': ['Open|Web|Content|Education: $2,500'],
u'2011': ['Webmaking 101 - Project Management & School of Webcraft - Community Coordination: $23,000']
},
u'',
),
Grant(
u'ushaidi-chile',
u'Ushahidi',
u'United States and Chile',
u'Ushahidi Chile',
u'free-culture-community',
u'$10,000',
2010,
u'<p>In a crisis environment, maintaining lines of communication is critically important. '
u'<a href="http://www.ushahidi.com/">Ushahidi</a> developed an open source platform that '
u'enables citizen reporting in crisis situations. A deadly earthquake struck Chile on '
u'February 27, 2010, cutting off many vulnerable people from traditional sources of '
u'information. Mozilla awarded a grant to enable Ushahidi volunteers to train Chilean '
u'civilians and government officials to utilize the Ushahidi platform during the relief '
u'effort.</p><p>See the <a href="reports/ushahidi-chile-report/">final grant report</a> '
u'for more details.</p>',
u'',
[
u'http://blog.ushahidi.com/index.php/2010/03/15/mozilla-foundation-supports-ushahidi-chile/',
]
),
Grant(
u'atlan',
u'Atlan Laboratories',
u'United States',
u'FIPS 140-2 Validation',
u'open-source-technology',
u'$25,000',
2008,
u'<p>This grant to Atlan Labs, along with funding from Red Hat and Sun Microsystems, '
u'supported FIPS 140-2 validation for the latest version of Network Security Services '
u'(NSS). Federal Information Processing Standards Publications (FIPS PUBS) '
u'140-1 and 140-2 are US government standards for implementations of cryptographic '
u'modules - that is, hardware or software that encrypts and decrypts data or '
u'performs other cryptographic operations. Atlan Labs was a a cybersecurity '
u'product testing firm based in McLean, Virginia that provided Federal Information '
u'Processing Standard (FIPS) 140-2 and 201 validations. Atlan was acquired by '
u'<a href="http://www.saic.com/infosec/testing-accreditation/">SAIC</a> in July 2009.</p>',
u'',
u'',
),
Grant(
u'automated-calendar-testing',
u'Merike Sell',
u'Estonia',
u'Calendar Automated Testing',
u'open-source-technology',
u'$4,500',
2009,
u'<p>This grant is funding the development of calendar automated testing for the '
u'Mozilla calendar code. This was originally an idea presented at the 2009 '
u'Google Summer of Code, and Mozilla Calendar developers became interested in '
u'funding technology that would enable automated testing. Merike Sell is an active '
u'member of the Mozilla developer and localization communites who live in Estonia.</p>',
u'',
u'',
),
Grant(
u'w3c-validator',
u'World Wide Web Consortium',
u'International',
u'W3C Validator',
u'open-source-technology',
u'$15,000',
2009,
u'<p>The Mozilla Foundation is a member of the <a href="http://www.w3.org/">'
u'World Wide Web Consortium</a>, and various Mozilla people represent Mozilla in '
u'W3C working groups and other W3C contexts. This grant was issued beyond Mozilla’s '
u'existing W3C membership dues, and funded work on '
u'<a href="http://jigsaw.w3.org/css-validator/">W3C CSS Validator</a> by giving to '
u'ERCIM, the W3C’s donation program.</p>',
u'',
u'',
),
Grant(
u'jambu',
u'Jambu',
u'United States',
u'Jambu',
u'open-source-technology',
u'$25,000',
2007,
u'<p><a href="www.oatsoft.org/Software/jambu">Jambu</a> is a pointer and switch '
u'project that improves accessibility for people with physical disabilities. '
u'This grant supported the improvement of switch access to Firefox on Windows, '
u'with the greater goal of providing transparent alternative input access to computers. '
u'Users served by this project may include adults who have experienced a debilitating '
u'accident or stroke, people with congential physical disabilities, children with '
u'multiple disabilities, and those with learning difficulties or limited education '
u'who often need to learn to use a switch through specialist educational programs.</p>',
{
u'2006': ['Phase 1: $15,000'],
u'2007': ['Phase 2: $10,000'],
},
u'',
),
Grant(
u'nu',
u'Northeastern University',
u'United States',
u'Graduate-level work of PhD students at Northeastern University',
u'open-source-technology',
u'$283,085',
2010,
u'<p>Since 2009 Mozilla has supported the graduate-level work of PhD students at '
u'<a href="http://www.ccs.neu.edu/">Northeastern University</a>, developing new tools '
u'for the standardization, streamlining, and testing of JavaScript. In 2009 Mozilla '
u'contributed $99,115 to the research efforts of '
u'<a href="http://www.ccs.neu.edu/home/samth/">Sam Tobin-Hochstadt</a>. In 2010 '
u'Mozilla made two gifts: one of $107,596 to further support Mr. Tobin-Hochstadt’s '
u'research and another gift of $76,374 to <a href="http://www.ccs.neu.edu/home/dimvar/">'
u'Demetrios Vardoulakis</a>.</p>',
{
u'2009': ['PhD Research of Sam Tobin-Hochstadt: $99,115'],
u'2010': ['PhD research of Sam Tobin-Hochstadt and Demetrios Vardoulakis: $107,596 and $76,374']
},
u'',
),
Grant(
u'owasp',
u'OWASP',
u'United States',
u'The Open Web Application Security Project',
u'open-source-technology',
u'$15,000',
2010,
u'<p>This grant supports the <a href="http://www.owasp.org/index.php/Main_Page">'
u'Open Web Application Security Project</a>, which focuses on improving the security '
u'of application software. OWASP\'s mission is to make application security visible, '
u'so that people and organizations can make informed decisions about true '
u'application security risks.</p>',
u'',
u'',
),
Grant(
u'webaim',
u'WebAIM',
u'United States',
u'WebAIM',
u'open-source-technology',
u'$15,000',
2006,
u'<p>In 2006, Mozilla provided a grant to <a href="http://webaim.org/">WebAIM</a>, '
u'an accessibility organization based at Utah State University, to develop XUL '
u'accessibility guidelines and an accompanying evaluation tool. WebAIM has provided '
u'comprehensive web accessibility solutions since 1999. These years of experience '
u'have made WebAIM one of the leading providers of web accessibility expertise '
u'internationally. WebAIM is a non-profit organization within the Center for '
u'Persons with Disabilities at Utah State University.</p>',
u'',
u'',
),
]
| mpl-2.0 |
Phoenix-CJ23/stockkernel | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
mthz/rpg_svo | svo_analysis/src/svo_analysis/filter_groundtruth_smooth.py | 17 | 1875 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import transformations
from scipy import signal
save = True
data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth.txt'
filtered_data_filename = '/home/cforster/Datasets/SlamBenchmark/asl_vicon_d2/groundtruth_filtered.txt'
file = open(data_filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
D = np.array([[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"], dtype=np.float64)
n = np.shape(D)[0]
rpy = np.empty([n,3])
for i in range(n):
quat = D[i,4:8]
rpy[i,:] = transformations.euler_from_quaternion(quat, axes='sxyz')
# filter rpy
f_sensor = 200.0; # sampling frequency in hz
f_cut = 15.0; # cutoff frequency in hz
b,a = signal.butter(5,f_cut/(f_sensor/2));
print a
print b
rpy_filt = np.empty([n,3])
rpy_filt[:,0] = signal.filtfilt(b, a, rpy[:,0])
rpy_filt[:,1] = signal.filtfilt(b, a, rpy[:,1])
rpy_filt[:,2] = signal.filtfilt(b, a, rpy[:,2])
fig = plt.figure()
ax = fig.add_subplot(111, title='orientation filtered')
ax.plot(rpy[:,0], 'r-')
ax.plot(rpy[:,1], 'g-')
ax.plot(rpy[:,2], 'b-')
ax.plot(rpy_filt[:,0], 'k-', linewidth=2)
ax.plot(rpy_filt[:,1], 'k-', linewidth=2)
ax.plot(rpy_filt[:,2], 'k-', linewidth=2)
fig = plt.figure()
ax = fig.add_subplot(111, title='position')
ax.plot(D[:,1], 'r')
ax.plot(D[:,2], 'g')
ax.plot(D[:,3], 'b')
fig = plt.figure()
ax = fig.add_subplot(111, title='trajectory from top')
ax.plot(D[:,1], D[:,2])
if save:
f = open(filtered_data_filename,'w')
for i in range(np.shape(D)[0]):
quat = transformations.quaternion_from_euler(rpy_filt[i,0], rpy_filt[i,1], rpy_filt[i,2], axes='sxyz')
f.write('%.7f %.5f %.5f %.5f %.5f %.5f %.5f %.5f\n' % (D[i,0], D[i,1], D[i,2], D[i,3], quat[0], quat[1], quat[2], quat[3]))
f.close()
| gpl-3.0 |
quinox/weblate | weblate/trans/tests/test_changes.py | 2 | 3112 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <[email protected]>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for changes browsing.
"""
from weblate.trans.tests.test_views import ViewTestCase
from django.core.urlresolvers import reverse
class ChangesTest(ViewTestCase):
def test_basic(self):
response = self.client.get(reverse('changes'))
self.assertContains(response, 'Resource update')
def test_basic_csv_denied(self):
response = self.client.get(reverse('changes-csv'))
self.assertEquals(response.status_code, 403)
def test_basic_csv(self):
self.make_manager()
response = self.client.get(reverse('changes-csv'))
self.assertContains(response, 'timestamp,')
def test_filter(self):
response = self.client.get(
reverse('changes'),
{'project': 'test'}
)
self.assertContains(response, 'Resource update')
self.assertNotContains(response, 'Failed to find matching project!')
response = self.client.get(
reverse('changes'),
{'project': 'test', 'subproject': 'test'}
)
self.assertContains(response, 'Resource update')
self.assertNotContains(response, 'Failed to find matching project!')
response = self.client.get(
reverse('changes'),
{'project': 'test', 'subproject': 'test', 'lang': 'cs'}
)
self.assertContains(response, 'Resource update')
self.assertNotContains(response, 'Failed to find matching project!')
response = self.client.get(
reverse('changes'),
{'lang': 'cs'}
)
self.assertContains(response, 'Resource update')
self.assertNotContains(response, 'Failed to find matching language!')
response = self.client.get(
reverse('changes'),
{'project': 'testx', 'subproject': 'test', 'lang': 'cs'}
)
self.assertContains(response, 'Resource update')
self.assertContains(response, 'Failed to find matching project!')
def test_user(self):
self.edit_unit(
'Hello, world!\n',
'Nazdar svete!\n'
)
response = self.client.get(
reverse('changes'),
{'user': self.user.username}
)
self.assertContains(response, 'New translation')
self.assertNotContains(response, 'Invalid search string!')
| gpl-3.0 |
mikebenfield/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
yakky/cmsplugin-filer | cmsplugin_filer_video/settings.py | 40 | 1093 | from django.conf import settings
VIDEO_WIDTH = getattr(settings, "VIDEO_WIDTH", 320)
VIDEO_HEIGHT = getattr(settings, "VIDEO_HEIGHT", 240)
VIDEO_AUTOPLAY = getattr(settings, "VIDEO_AUTOPLAY", False)
VIDEO_AUTOHIDE = getattr(settings, "VIDEO_AUTOHIDE", False)
VIDEO_FULLSCREEN = getattr(settings, "VIDEO_FULLSCREEN", True)
VIDEO_LOOP = getattr(settings, "VIDEO_LOOP", False)
VIDEO_BG_COLOR = getattr(settings, "VIDEO_BG_COLOR", "000000")
VIDEO_TEXT_COLOR = getattr(settings, "VIDEO_TEXT_COLOR", "FFFFFF")
VIDEO_SEEKBAR_COLOR = getattr(settings, "VIDEO_SEEKBAR_COLOR", "13ABEC")
VIDEO_SEEKBARBG_COLOR = getattr(settings, "VIDEO_SEEKBARBG_COLOR", "333333")
VIDEO_LOADINGBAR_COLOR = getattr(settings, "VIDEO_LOADINGBAR_COLOR", "828282")
VIDEO_BUTTON_OUT_COLOR = getattr(settings, "VIDEO_BUTTON_OUT_COLOR", "333333")
VIDEO_BUTTON_OVER_COLOR = getattr(settings, "VIDEO_BUTTON_OVER_COLOR", "000000")
VIDEO_BUTTON_HIGHLIGHT_COLOR = getattr(settings, "VIDEO_BUTTON_HIGHLIGHT_COLOR", "FFFFFF")
VIDEO_PLUGIN_ENABLE_ADVANCED_SETTINGS = getattr(settings, "VIDEO_PLUGIN_ENABLE_ADVANCED_SETTINGS", True)
| bsd-3-clause |
iee/iee_fuse | smb/testdaemon.py | 1 | 1657 | #!/usr/bin/env python
# To kick off the script, run the following from the python directory:
# PYTHONPATH=`pwd` python testdaemon.py start
#standard python libs
import logging
import time
import stomp
#third party libs
from daemon import runner
class SampleListener(object):
def on_message(self, headers, msg):
logger.info(msg)
class App():
def __init__(self):
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/null'
self.stderr_path = '/dev/null'
self.pidfile_path = '/var/run/testdaemon/testdaemon.pid'
self.pidfile_timeout = 5
def run(self):
conn = stomp.Connection10()
conn.set_listener('SampleListener', SampleListener())
conn.start()
conn.connect()
conn.subscribe('SampleQueue')
while True:
#Main code goes here ...
#Note that logger level needs to be set to logging.DEBUG before this shows up in the logs
logger.debug("Debug message")
logger.info("Info message")
logger.warn("Warning message")
logger.error("Error message")
time.sleep(10)
app = App()
logger = logging.getLogger("DaemonLog")
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
handler = logging.FileHandler("/var/log/testdaemon/testdaemon.log")
handler.setFormatter(formatter)
logger.addHandler(handler)
daemon_runner = runner.DaemonRunner(app)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
daemon_runner.do_action()
| gpl-3.0 |
rue89-tech/edx-analytics-pipeline | edx/analytics/tasks/tests/test_overall_events.py | 2 | 4614 | """Tests overall count of events"""
import sys
import json
from edx.analytics.tasks.tests.map_reduce_mixins import MapperTestMixin, ReducerTestMixin
import luigi
from edx.analytics.tasks.tests import unittest
from edx.analytics.tasks.overall_events import TotalEventsDailyTask
from edx.analytics.tasks.tests.opaque_key_mixins import InitializeOpaqueKeysMixin
from StringIO import StringIO
class TotalEventsTaskMapTest(InitializeOpaqueKeysMixin, MapperTestMixin, unittest.TestCase):
"""Ensure events of various flavors are counted"""
DATE = '2013-12-17'
def setUp(self):
self.task_class = TotalEventsDailyTask
super(TotalEventsTaskMapTest, self).setUp()
self.initialize_ids()
self.task.init_local()
self.event_type = "edx.course.enrollment.activated"
self.timestamp = "{}T15:38:32.805444".format(self.DATE)
self.user_id = 10
self.event_templates = {
'event': {
"username": "test_user",
"host": "test_host",
"session": "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb",
"event_source": "server",
"name": self.event_type,
"event_type": self.event_type,
"context": {
"course_id": self.course_id,
"org_id": self.org_id,
"user_id": self.user_id,
},
"time": "{0}+00:00".format(self.timestamp),
"ip": "127.0.0.1",
"event": {
"course_id": self.course_id,
"user_id": self.user_id,
"mode": "honor",
},
"agent": "blah, blah, blah",
"page": None
}
}
self.default_event_template = 'event'
def test_explicit_event(self):
line = self.create_event_log_line(user_id="")
self.assert_single_map_output(line, self.DATE, 1)
def test_no_timestamp(self):
line = self.create_event_log_line(timestamp="")
self.assert_single_map_output(line, self.DATE, 1)
def test_bad_event(self):
line = "bad event"
self.assert_no_map_output_for(line)
def test_event_no_ids(self):
"""
Many events (generally implicit events) have typical edx IDs missing or unavailable
because of their contexts. This test ensures these events are still counted.
"""
self.empty_ids()
line = self.create_event_log_line()
self.assert_single_map_output(line, self.DATE, 1)
def test_implicit_event(self):
event = {
"username": "",
"host": "precise64",
"event_source": "server",
"event_type": "/jsi18n/",
"context": {
"user_id": "",
"org_id": "",
"course_id": "",
"path": "/jsi18n/"
},
"time": "{}T22:11:29.689805+00:00".format(self.DATE),
"ip": "10.0.2.2",
"event": "{\"POST\": {}, \"GET\": {}}",
"agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/39.0.2171.71 Safari/537.36",
"page": "null"
}
line = json.dumps(event)
self.assert_single_map_output(line, self.DATE, 1)
def test_no_time_element(self):
event_line = self.create_event_dict()
del event_line["time"]
line = json.dumps(event_line)
# When the time element is missing, luigi will print an error to stderr.
# Capture stderr and assert it is what we expect. Also assert that we do not
# count the event.
test_stderr = StringIO()
sys.stderr = test_stderr
self.assert_no_map_output_for(line)
test_stderr = test_stderr.getvalue().strip()
self.assertEquals(test_stderr, 'reporter:counter:Event,Missing Time Field,1')
class TotalEventsTaskReducerTest(ReducerTestMixin, unittest.TestCase):
"""Ensure counts are aggregated"""
def setUp(self):
self.task_class = TotalEventsDailyTask
super(TotalEventsTaskReducerTest, self).setUp()
self.reduce_key = '2013-12-17T00:00:01'
def test_one_event_count(self):
inputs = [1, ]
expected = ((self.reduce_key, 1), )
self._check_output_complete_tuple(inputs, expected)
def test_multiple_events_same_day(self):
inputs = [1, 1]
expected = ((self.reduce_key, 2), )
self._check_output_complete_tuple(inputs, expected)
| agpl-3.0 |
mirror/cygwin | gdb/python/lib/gdb/command/frame_filters.py | 8 | 16600 | # Frame-filter commands.
# Copyright (C) 2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""GDB commands for working with frame-filters."""
import sys
import gdb
import copy
from gdb.FrameIterator import FrameIterator
from gdb.FrameDecorator import FrameDecorator
import gdb.frames
import itertools
# GDB Commands.
class SetFilterPrefixCmd(gdb.Command):
"""Prefix command for 'set' frame-filter related operations."""
def __init__(self):
super(SetFilterPrefixCmd, self).__init__("set frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class ShowFilterPrefixCmd(gdb.Command):
"""Prefix command for 'show' frame-filter related operations."""
def __init__(self):
super(ShowFilterPrefixCmd, self).__init__("show frame-filter",
gdb.COMMAND_OBSCURE,
gdb.COMPLETE_NONE, True)
class InfoFrameFilter(gdb.Command):
"""List all registered Python frame-filters.
Usage: info frame-filters
"""
def __init__(self):
super(InfoFrameFilter, self).__init__("info frame-filter",
gdb.COMMAND_DATA)
@staticmethod
def enabled_string(state):
"""Return "Yes" if filter is enabled, otherwise "No"."""
if state:
return "Yes"
else:
return "No"
def list_frame_filters(self, frame_filters):
""" Internal worker function to list and print frame filters
in a dictionary.
Arguments:
frame_filters: The name of the dictionary, as
specified by GDB user commands.
"""
sorted_frame_filters = sorted(frame_filters.items(),
key=lambda i: gdb.frames.get_priority(i[1]),
reverse=True)
if len(sorted_frame_filters) == 0:
print(" No frame filters registered.")
else:
print(" Priority Enabled Name")
for frame_filter in sorted_frame_filters:
name = frame_filter[0]
try:
priority = '{:<8}'.format(
str(gdb.frames.get_priority(frame_filter[1])))
enabled = '{:<7}'.format(
self.enabled_string(gdb.frames.get_enabled(frame_filter[1])))
except Exception:
e = sys.exc_info()[1]
print(" Error printing filter '"+name+"': "+str(e))
else:
print(" %s %s %s" % (priority, enabled, name))
def print_list(self, title, filter_list, blank_line):
print(title)
self.list_frame_filters(filter_list)
if blank_line:
print("")
def invoke(self, arg, from_tty):
self.print_list("global frame-filters:", gdb.frame_filters, True)
cp = gdb.current_progspace()
self.print_list("progspace %s frame-filters:" % cp.filename,
cp.frame_filters, True)
for objfile in gdb.objfiles():
self.print_list("objfile %s frame-filters:" % objfile.filename,
objfile.frame_filters, False)
# Internal enable/disable functions.
def _enable_parse_arg(cmd_name, arg):
""" Internal worker function to take an argument from
enable/disable and return a tuple of arguments.
Arguments:
cmd_name: Name of the command invoking this function.
args: The argument as a string.
Returns:
A tuple containing the dictionary, and the argument, or just
the dictionary in the case of "all".
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argv[0] == "all" and argc > 1:
raise gdb.GdbError(cmd_name + ": with 'all' " \
"you may not specify a filter.")
else:
if argv[0] != "all" and argc != 2:
raise gdb.GdbError(cmd_name + " takes exactly two arguments.")
return argv
def _do_enable_frame_filter(command_tuple, flag):
"""Worker for enabling/disabling frame_filters.
Arguments:
command_type: A tuple with the first element being the
frame filter dictionary, and the second being
the frame filter name.
flag: True for Enable, False for Disable.
"""
list_op = command_tuple[0]
op_list = gdb.frames.return_list(list_op)
if list_op == "all":
for item in op_list:
gdb.frames.set_enabled(item, flag)
else:
frame_filter = command_tuple[1]
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_enabled(ff, flag)
def _complete_frame_filter_list(text, word, all_flag):
"""Worker for frame filter dictionary name completion.
Arguments:
text: The full text of the command line.
word: The most recent word of the command line.
all_flag: Whether to include the word "all" in completion.
Returns:
A list of suggested frame filter dictionary name completions
from text/word analysis. This list can be empty when there
are no suggestions for completion.
"""
if all_flag == True:
filter_locations = ["all", "global", "progspace"]
else:
filter_locations = ["global", "progspace"]
for objfile in gdb.objfiles():
filter_locations.append(objfile.filename)
# If the user just asked for completions with no completion
# hints, just return all the frame filter dictionaries we know
# about.
if (text == ""):
return filter_locations
# Otherwise filter on what we know.
flist = filter(lambda x,y=text:x.startswith(y), filter_locations)
# If we only have one completion, complete it and return it.
if len(flist) == 1:
flist[0] = flist[0][len(text)-len(word):]
# Otherwise, return an empty list, or a list of frame filter
# dictionaries that the previous filter operation returned.
return flist
def _complete_frame_filter_name(word, printer_dict):
"""Worker for frame filter name completion.
Arguments:
word: The most recent word of the command line.
printer_dict: The frame filter dictionary to search for frame
filter name completions.
Returns: A list of suggested frame filter name completions
from word analysis of the frame filter dictionary. This list
can be empty when there are no suggestions for completion.
"""
printer_keys = printer_dict.keys()
if (word == ""):
return printer_keys
flist = filter(lambda x,y=word:x.startswith(y), printer_keys)
return flist
class EnableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: enable frame-filter enable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(EnableFrameFilter, self).__init__("enable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("enable frame-filter", arg)
_do_enable_frame_filter(command_tuple, True)
class DisableFrameFilter(gdb.Command):
"""GDB command to disable the specified frame-filter.
Usage: disable frame-filter disable DICTIONARY [NAME]
DICTIONARY is the name of the frame filter dictionary on which to
operate. If dictionary is set to "all", perform operations on all
dictionaries. Named dictionaries are: "global" for the global
frame filter dictionary, "progspace" for the program space's frame
filter dictionary. If either all, or the two named dictionaries
are not specified, the dictionary name is assumed to be the name
of the object-file name.
NAME matches the name of the frame-filter to operate on. If
DICTIONARY is "all", NAME is ignored.
"""
def __init__(self):
super(DisableFrameFilter, self).__init__("disable frame-filter",
gdb.COMMAND_DATA)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, True)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = _enable_parse_arg("disable frame-filter", arg)
_do_enable_frame_filter(command_tuple, False)
class SetFrameFilterPriority(gdb.Command):
"""GDB command to set the priority of the specified frame-filter.
Usage: set frame-filter priority DICTIONARY NAME PRIORITY
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame filter to operate on.
PRIORITY is the an integer to assign the new priority to the frame
filter.
"""
def __init__(self):
super(SetFrameFilterPriority, self).__init__("set frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a priority from a tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, name and priority from
the arguments.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 3:
print("set frame-filter priority " \
"takes exactly three arguments.")
return None
return argv
def _set_filter_priority(self, command_tuple):
"""Internal worker for setting priority of frame-filters, by
parsing a tuple and calling _set_priority with the parsed
tuple.
Arguments:
command_tuple: Tuple which contains the arguments from the
command.
"""
list_op = command_tuple[0]
frame_filter = command_tuple[1]
# GDB returns arguments as a string, so convert priority to
# a number.
priority = int(command_tuple[2])
op_list = gdb.frames.return_list(list_op)
try:
ff = op_list[frame_filter]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
gdb.frames.set_priority(ff, priority)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = gdb.frames.return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple != None:
self._set_filter_priority(command_tuple)
class ShowFrameFilterPriority(gdb.Command):
"""GDB command to show the priority of the specified frame-filter.
Usage: show frame-filter priority DICTIONARY NAME
DICTIONARY is the name of the frame filter dictionary on which to
operate. Named dictionaries are: "global" for the global frame
filter dictionary, "progspace" for the program space's framefilter
dictionary. If either of these two are not specified, the
dictionary name is assumed to be the name of the object-file name.
NAME matches the name of the frame-filter to operate on.
"""
def __init__(self):
super(ShowFrameFilterPriority, self).__init__("show frame-filter " \
"priority",
gdb.COMMAND_DATA)
def _parse_pri_arg(self, arg):
"""Internal worker to parse a dictionary and name from a
tuple.
Arguments:
arg: Tuple which contains the arguments from the command.
Returns:
A tuple containing the dictionary, and frame filter name.
Raises:
gdb.GdbError: An error parsing the arguments.
"""
argv = gdb.string_to_argv(arg);
argc = len(argv)
if argc != 2:
print("show frame-filter priority " \
"takes exactly two arguments.")
return None
return argv
def get_filter_priority(self, frame_filters, name):
"""Worker for retrieving the priority of frame_filters.
Arguments:
frame_filters: Name of frame filter dictionary.
name: object to select printers.
Returns:
The priority of the frame filter.
Raises:
gdb.GdbError: A frame filter cannot be found.
"""
op_list = gdb.frames.return_list(frame_filters)
try:
ff = op_list[name]
except KeyError:
msg = "frame-filter '" + str(name) + "' not found."
raise gdb.GdbError(msg)
return gdb.frames.get_priority(ff)
def complete(self, text, word):
"""Completion function for both frame filter dictionary, and
frame filter name."""
if text.count(" ") == 0:
return _complete_frame_filter_list(text, word, False)
else:
printer_list = frame._return_list(text.split()[0].rstrip())
return _complete_frame_filter_name(word, printer_list)
def invoke(self, arg, from_tty):
command_tuple = self._parse_pri_arg(arg)
if command_tuple == None:
return
filter_name = command_tuple[1]
list_name = command_tuple[0]
try:
priority = self.get_filter_priority(list_name, filter_name);
except Exception:
e = sys.exc_info()[1]
print("Error printing filter priority for '"+name+"':"+str(e))
else:
print("Priority of filter '" + filter_name + "' in list '" \
+ list_name + "' is: " + str(priority))
# Register commands
SetFilterPrefixCmd()
ShowFilterPrefixCmd()
InfoFrameFilter()
EnableFrameFilter()
DisableFrameFilter()
SetFrameFilterPriority()
ShowFrameFilterPriority()
| gpl-2.0 |
cognitiveclass/edx-platform | common/lib/xmodule/xmodule/word_cloud_module.py | 42 | 8097 | """Word cloud is ungraded xblock used by students to
generate and view word cloud.
On the client side we show:
If student does not yet answered - `num_inputs` numbers of text inputs.
If student have answered - words he entered and cloud.
"""
import json
import logging
from pkg_resources import resource_string
from xmodule.raw_module import EmptyDataRawDescriptor
from xmodule.editing_module import MetadataOnlyEditingDescriptor
from xmodule.x_module import XModule
from xblock.fields import Scope, Dict, Boolean, List, Integer, String
log = logging.getLogger(__name__)
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
def pretty_bool(value):
"""Check value for possible `True` value.
Using this function we can manage different type of Boolean value
in xml files.
"""
bool_dict = [True, "True", "true", "T", "t", "1"]
return value in bool_dict
class WordCloudFields(object):
"""XFields for word cloud."""
display_name = String(
display_name=_("Display Name"),
help=_("Display name for this module"),
scope=Scope.settings,
default="Word cloud"
)
num_inputs = Integer(
display_name=_("Inputs"),
help=_("Number of text boxes available for students to input words/sentences."),
scope=Scope.settings,
default=5,
values={"min": 1}
)
num_top_words = Integer(
display_name=_("Maximum Words"),
help=_("Maximum number of words to be displayed in generated word cloud."),
scope=Scope.settings,
default=250,
values={"min": 1}
)
display_student_percents = Boolean(
display_name=_("Show Percents"),
help=_("Statistics are shown for entered words near that word."),
scope=Scope.settings,
default=True
)
# Fields for descriptor.
submitted = Boolean(
help=_("Whether this student has posted words to the cloud."),
scope=Scope.user_state,
default=False
)
student_words = List(
help=_("Student answer."),
scope=Scope.user_state,
default=[]
)
all_words = Dict(
help=_("All possible words from all students."),
scope=Scope.user_state_summary
)
top_words = Dict(
help=_("Top num_top_words words for word cloud."),
scope=Scope.user_state_summary
)
class WordCloudModule(WordCloudFields, XModule):
"""WordCloud Xmodule"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/word_cloud/d3.min.js'),
resource_string(__name__, 'js/src/word_cloud/d3.layout.cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud.js'),
resource_string(__name__, 'js/src/word_cloud/word_cloud_main.js'),
],
}
css = {'scss': [resource_string(__name__, 'css/word_cloud/display.scss')]}
js_module_name = "WordCloud"
def get_state(self):
"""Return success json answer for client."""
if self.submitted:
total_count = sum(self.all_words.itervalues())
return json.dumps({
'status': 'success',
'submitted': True,
'display_student_percents': pretty_bool(
self.display_student_percents
),
'student_words': {
word: self.all_words[word] for word in self.student_words
},
'total_count': total_count,
'top_words': self.prepare_words(self.top_words, total_count)
})
else:
return json.dumps({
'status': 'success',
'submitted': False,
'display_student_percents': False,
'student_words': {},
'total_count': 0,
'top_words': {}
})
def good_word(self, word):
"""Convert raw word to suitable word."""
return word.strip().lower()
def prepare_words(self, top_words, total_count):
"""Convert words dictionary for client API.
:param top_words: Top words dictionary
:type top_words: dict
:param total_count: Total number of words
:type total_count: int
:rtype: list of dicts. Every dict is 3 keys: text - actual word,
size - counter of word, percent - percent in top_words dataset.
Calculates corrected percents for every top word:
For every word except last, it calculates rounded percent.
For the last is 100 - sum of all other percents.
"""
list_to_return = []
percents = 0
for num, word_tuple in enumerate(top_words.iteritems()):
if num == len(top_words) - 1:
percent = 100 - percents
else:
percent = round(100.0 * word_tuple[1] / total_count)
percents += percent
list_to_return.append(
{
'text': word_tuple[0],
'size': word_tuple[1],
'percent': percent
}
)
return list_to_return
def top_dict(self, dict_obj, amount):
"""Return top words from all words, filtered by number of
occurences
:param dict_obj: all words
:type dict_obj: dict
:param amount: number of words to be in top dict
:type amount: int
:rtype: dict
"""
return dict(
sorted(
dict_obj.items(),
key=lambda x: x[1],
reverse=True
)[:amount]
)
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request get parameters
Returns:
json string
"""
if dispatch == 'submit':
if self.submitted:
return json.dumps({
'status': 'fail',
'error': 'You have already posted your data.'
})
# Student words from client.
# FIXME: we must use raw JSON, not a post data (multipart/form-data)
raw_student_words = data.getall('student_words[]')
student_words = filter(None, map(self.good_word, raw_student_words))
self.student_words = student_words
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
# speed issues
temp_all_words = self.all_words
self.submitted = True
# Save in all_words.
for word in self.student_words:
temp_all_words[word] = temp_all_words.get(word, 0) + 1
# Update top_words.
self.top_words = self.top_dict(
temp_all_words,
self.num_top_words
)
# Save all_words in database.
self.all_words = temp_all_words
return self.get_state()
elif dispatch == 'get_state':
return self.get_state()
else:
return json.dumps({
'status': 'fail',
'error': 'Unknown Command!'
})
def get_html(self):
"""Template rendering."""
context = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'num_inputs': self.num_inputs,
'submitted': self.submitted
}
self.content = self.system.render_template('word_cloud.html', context)
return self.content
class WordCloudDescriptor(WordCloudFields, MetadataOnlyEditingDescriptor, EmptyDataRawDescriptor):
"""Descriptor for WordCloud Xmodule."""
module_class = WordCloudModule
template_dir_name = 'word_cloud'
| agpl-3.0 |
geekboxzone/lollipop_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/port/builders.py | 25 | 5188 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from webkitpy.common.memoized import memoized
# In this dictionary, each item stores:
# * port_name -- a fully qualified port name
# * rebaseline_override_dir -- (optional) directory to put baselines in instead of where you would normally put them.
# This is useful when we don't have bots that cover particular configurations; so, e.g., you might
# support mac-mountainlion but not have a mac-mountainlion bot yet, so you'd want to put the mac-lion
# results into platform/mac temporarily.
# * specifiers -- TestExpectation specifiers for that config. Valid values are found in
# TestExpectationsParser._configuration_tokens_list
_exact_matches = {
"WebKit XP": {"port_name": "win-xp", "specifiers": ['XP', 'Release']},
"WebKit Win7": {"port_name": "win-win7", "specifiers": ['Win7', 'Release']},
"WebKit Win7 (dbg)": {"port_name": "win-win7", "specifiers": ['Win7', 'Debug']},
"WebKit Linux": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Release']},
"WebKit Linux 32": {"port_name": "linux-x86", "specifiers": ['Linux', 'Release']},
"WebKit Linux (dbg)": {"port_name": "linux-x86_64", "specifiers": ['Linux', 'Debug']},
"WebKit Mac10.6": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Release']},
"WebKit Mac10.6 (dbg)": {"port_name": "mac-snowleopard", "specifiers": ['SnowLeopard', 'Debug']},
"WebKit Mac10.7": {"port_name": "mac-lion", "specifiers": ['Lion', 'Release']},
"WebKit Mac10.7 (dbg)": {"port_name": "mac-lion", "specifiers": ['Lion', 'Debug']},
"WebKit Mac10.8": {"port_name": "mac-mountainlion", "specifiers": ['MountainLion', 'Release']},
"WebKit Mac10.8 (retina)": {"port_name": "mac-retina", "specifiers": ['Retina', 'Release']},
"WebKit Mac10.9": {"port_name": "mac-mavericks", "specifiers": ['Mavericks', 'Release']},
"WebKit Android (Nexus4)": {"port_name": "android", "specifiers": ['Android', 'Release']},
}
# Mapping from port name to the deps builder of the same os:
_deps_builders = {
"linux-x86": "WebKit Linux (deps)",
"linux-x86_64": "WebKit Linux (deps)",
"win-xp": "WebKit XP (deps)",
"win-win7": "WebKit XP (deps)",
"mac-snowleopard": "WebKit Mac10.6 (deps)",
"mac-lion": "WebKit Mac10.6 (deps)",
"mac-mountainlion": "WebKit Mac10.6 (deps)",
"mac-mavericks": "WebKit Mac10.6 (deps)",
"mac-retina": "WebKit Mac10.6 (deps)",
}
_ports_without_builders = [
]
def builder_path_from_name(builder_name):
return re.sub(r'[\s().]', '_', builder_name)
def all_builder_names():
return sorted(set(_exact_matches.keys()))
def all_port_names():
return sorted(set(map(lambda x: x["port_name"], _exact_matches.values()) + _ports_without_builders))
def rebaseline_override_dir(builder_name):
return _exact_matches[builder_name].get("rebaseline_override_dir", None)
def port_name_for_builder_name(builder_name):
return _exact_matches[builder_name]["port_name"]
def specifiers_for_builder(builder_name):
return _exact_matches[builder_name]["specifiers"]
def builder_name_for_port_name(target_port_name):
debug_builder_name = None
for builder_name, builder_info in _exact_matches.items():
if builder_info['port_name'] == target_port_name:
if 'dbg' in builder_name:
debug_builder_name = builder_name
else:
return builder_name
return debug_builder_name
def builder_path_for_port_name(port_name):
builder_path_from_name(builder_name_for_port_name(port_name))
def deps_builder_name_for_port_name(target_port_name):
return _deps_builders.get(target_port_name, None)
| bsd-3-clause |
terbolous/SickRage | sickbeard/notifiers/nmjv2.py | 3 | 8204 | # coding=utf-8
# Author: Jasper Lanting
# Based on nmj.py by Nico Berlee: http://nico.berlee.nl/
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
from xml.dom.minidom import parseString
import sickbeard
import time
from sickbeard import logger
from six.moves import urllib
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
class Notifier(object):
def notify_snatch(self, ep_name): # pylint: disable=unused-argument
return False
# Not implemented: Start the scanner when snatched does not make any sense
def notify_download(self, ep_name): # pylint: disable=unused-argument
self._notifyNMJ()
def notify_subtitle_download(self, ep_name, lang): # pylint: disable=unused-argument
self._notifyNMJ()
def notify_git_update(self, new_version): # pylint: disable=unused-argument
return False
# Not implemented, no reason to start scanner.
def notify_login(self, ipaddress=""): # pylint: disable=unused-argument
return False
def test_notify(self, host):
return self._sendNMJ(host)
def notify_settings(self, host, dbloc, instance):
"""
Retrieves the NMJv2 database location from Popcorn hour
host: The hostname/IP of the Popcorn Hour server
dbloc: 'local' for PCH internal hard drive. 'network' for PCH network shares
instance: Allows for selection of different DB in case of multiple databases
Returns: True if the settings were retrieved successfully, False otherwise
"""
try:
url_loc = "http://{0}:8008/file_operation?arg0=list_user_storage_file&arg1=&arg2={1}&arg3=20&arg4=true&arg5=true&arg6=true&arg7=all&arg8=name_asc&arg9=false&arg10=false".format(host, instance)
req = urllib.request.Request(url_loc)
handle1 = urllib.request.urlopen(req)
response1 = handle1.read()
xml = parseString(response1)
time.sleep(300.0 / 1000.0)
for node in xml.getElementsByTagName('path'):
xmlTag = node.toxml()
xmlData = xmlTag.replace('<path>', '').replace('</path>', '').replace('[=]', '')
url_db = "http://" + host + ":8008/metadata_database?arg0=check_database&arg1=" + xmlData
reqdb = urllib.request.Request(url_db)
handledb = urllib.request.urlopen(reqdb)
responsedb = handledb.read()
xmldb = parseString(responsedb)
returnvalue = xmldb.getElementsByTagName('returnValue')[0].toxml().replace('<returnValue>', '').replace(
'</returnValue>', '')
if returnvalue == "0":
DB_path = xmldb.getElementsByTagName('database_path')[0].toxml().replace(
'<database_path>', '').replace('</database_path>', '').replace('[=]', '')
if dbloc == "local" and DB_path.find("localhost") > -1:
sickbeard.NMJv2_HOST = host
sickbeard.NMJv2_DATABASE = DB_path
return True
if dbloc == "network" and DB_path.find("://") > -1:
sickbeard.NMJv2_HOST = host
sickbeard.NMJv2_DATABASE = DB_path
return True
except IOError as e:
logger.log("Warning: Couldn't contact popcorn hour on host {0}: {1}".format(host, e), logger.WARNING)
return False
return False
def _sendNMJ(self, host):
"""
Sends a NMJ update command to the specified machine
host: The hostname/IP to send the request to (no port)
database: The database to send the request to
mount: The mount URL to use (optional)
Returns: True if the request succeeded, False otherwise
"""
# if a host is provided then attempt to open a handle to that URL
try:
url_scandir = "http://" + host + ":8008/metadata_database?arg0=update_scandir&arg1=" + sickbeard.NMJv2_DATABASE + "&arg2=&arg3=update_all"
logger.log("NMJ scan update command sent to host: {0}".format(host), logger.DEBUG)
url_updatedb = "http://" + host + ":8008/metadata_database?arg0=scanner_start&arg1=" + sickbeard.NMJv2_DATABASE + "&arg2=background&arg3="
logger.log("Try to mount network drive via url: {0}".format(host), logger.DEBUG)
prereq = urllib.request.Request(url_scandir)
req = urllib.request.Request(url_updatedb)
handle1 = urllib.request.urlopen(prereq)
response1 = handle1.read()
time.sleep(300.0 / 1000.0)
handle2 = urllib.request.urlopen(req)
response2 = handle2.read()
except IOError as e:
logger.log("Warning: Couldn't contact popcorn hour on host {0}: {1}".format(host, e), logger.WARNING)
return False
try:
et = etree.fromstring(response1)
result1 = et.findtext("returnValue")
except SyntaxError as e:
logger.log("Unable to parse XML returned from the Popcorn Hour: update_scandir, {0}".format(e), logger.ERROR)
return False
try:
et = etree.fromstring(response2)
result2 = et.findtext("returnValue")
except SyntaxError as e:
logger.log("Unable to parse XML returned from the Popcorn Hour: scanner_start, {0}".format(e), logger.ERROR)
return False
# if the result was a number then consider that an error
error_codes = ["8", "11", "22", "49", "50", "51", "60"]
error_messages = ["Invalid parameter(s)/argument(s)",
"Invalid database path",
"Insufficient size",
"Database write error",
"Database read error",
"Open fifo pipe failed",
"Read only file system"]
if int(result1) > 0:
index = error_codes.index(result1)
logger.log("Popcorn Hour returned an error: {0}".format((error_messages[index])), logger.ERROR)
return False
else:
if int(result2) > 0:
index = error_codes.index(result2)
logger.log("Popcorn Hour returned an error: {0}".format((error_messages[index])), logger.ERROR)
return False
else:
logger.log("NMJv2 started background scan", logger.INFO)
return True
def _notifyNMJ(self, host=None, force=False):
"""
Sends a NMJ update command based on the SB config settings
host: The host to send the command to (optional, defaults to the host in the config)
database: The database to use (optional, defaults to the database in the config)
mount: The mount URL (optional, defaults to the mount URL in the config)
force: If True then the notification will be sent even if NMJ is disabled in the config
"""
if not sickbeard.USE_NMJv2 and not force:
logger.log("Notification for NMJ scan update not enabled, skipping this notification", logger.DEBUG)
return False
# fill in omitted parameters
if not host:
host = sickbeard.NMJv2_HOST
logger.log("Sending scan command for NMJ ", logger.DEBUG)
return self._sendNMJ(host)
| gpl-3.0 |
3L3N4/volatility | volatility/conf.py | 57 | 15263 | ## This file was taken from PyFlag http://www.pyflag.net/
# Michael Cohen <[email protected]>
# David Collett <[email protected]>
#
# ******************************************************
# Version: FLAG $Version: 0.87-pre1 Date: Thu Jun 12 00:48:38 EST 2008$
# ******************************************************
#
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ******************************************************
#pylint: disable-msg=C0111
""" Configuration modules for pyflag.
PyFlag is a complex package and requires a flexible configuration
system. The following are the requirements of the configuration
system:
1) Configuration must be available from a number of sources:
- Autoconf must be able to set things like the python path (in case
pyflag is installed to a different prefix)
- Users must be able to configure the installed system for their
specific requirements.
- Unconfigured parameters must be resolved at run time through the
GUI and saved.
2) Configuration must be able to apply to cases specifically.
3) Because pyflag is modular, configuration variables might be required
for each module. This means that definitions and declarations of
configuration variables must be distributed in each plugin.
These goals are achieved by the use of multiple sources of
configuration information:
- The system wide configuration file is this file: conf.py. It is
generated from the build system from conf.py.in by substituting
autoconfigured variables into it. It contains the most basic
settings related to the installation, e.g. which python interpreted
is used, where the python modules are installed etc. In particular
it refers to the location of the system configuration file (usually
found in /usr/local/etc/pyflagrc, or in /etc/pyflagrc).
- The sysconfig file contains things like where the upload
directory is, where to store temporary files etc. These are mainly
installation wide settings which are expected to be modified by the
administrator. Note that if you want the GUI to manipulate this
file it needs to be writable by the user running the GUI.
- Finally a conf table in each case is used to provide a per case
configuration
"""
import ConfigParser
import optparse
import os
import sys
default_config = "/etc/volatilityrc"
class PyFlagOptionParser(optparse.OptionParser):
final = False
help_hooks = []
def _process_args(self, largs, rargs, values):
try:
return optparse.OptionParser._process_args(self, largs, rargs, values)
except (optparse.BadOptionError, optparse.OptionValueError), err:
if self.final:
raise err
def error(self, msg):
## We cant emit errors about missing parameters until we are
## sure that all modules have registered all their parameters
if self.final:
return optparse.OptionParser.error(self, msg)
else:
raise RuntimeError(msg)
def print_help(self, file = sys.stdout):
optparse.OptionParser.print_help(self, file)
for cb in self.help_hooks:
file.write(cb())
class ConfObject(object):
""" This is a singleton class to manage the configuration.
This means it can be instantiated many times, but each instance
refers to the global configuration (which is set in class
variables).
NOTE: The class attributes have static dicts assigned to
facilitate singleton behaviour. This means all future instances
will have the same dicts.
"""
optparser = PyFlagOptionParser(add_help_option = False,
version = False,
)
initialised = False
## This is the globals dictionary which will be used for
## evaluating the configuration directives.
g_dict = dict(__builtins__ = None)
## These are the options derived by reading any config files
cnf_opts = {}
## Command line opts
opts = {}
args = None
default_opts = {}
docstrings = {}
## These are the actual options returned by the optparser:
optparse_opts = None
## Filename where the configuration file is:
_filename = None
_filenames = []
## These parameters can not be updated by the GUI (but will be
## propagated into new configuration files)
readonly = {}
## Absolute parameters can only be set by the code or command
## lines, they can not be over ridden in the configuration
## file. This ensures that only configuration files dont mask new
## options (e.g. schema version)
_absolute = {}
## A list of option names:
options = []
## Cache variants: There are configuration options which
## encapsulate the state of the running program. If any of these
## change all caches will be invalidated.
cache_invalidators = {}
def __init__(self):
""" This is a singleton object kept in the class """
if not ConfObject.initialised:
self.optparser.add_option("-h", "--help", action = "store_true", default = False,
help = "list all available options and their default values. Default values may be set in the configuration file (" + default_config + ")")
ConfObject.initialised = True
def set_usage(self, usage = None, version = None):
if usage:
self.optparser.set_usage(usage)
if version:
self.optparser.version = version
def add_file(self, filename, _type = 'init'):
""" Adds a new file to parse """
self._filenames.append(filename)
self.cnf_opts.clear()
for f in self._filenames:
try:
conf_parser = ConfigParser.ConfigParser()
conf_parser.read(f)
for k, v in conf_parser.items('DEFAULT'):
## Absolute parameters are protected from
## configuration files:
if k in self._absolute.keys():
continue
try:
v = eval(v, self.g_dict)
except Exception, _e:
pass
## update the configured options
self.cnf_opts[k] = v
except IOError:
print "Unable to open {0}".format(f)
ConfObject._filename = filename
def print_help(self):
return self.optparser.print_help()
def add_help_hook(self, cb):
""" Adds an epilog to the help message """
self.optparser.help_hooks.append(cb)
def set_help_hook(self, cb):
self.optparser.help_hooks = [cb]
def parse_options(self, final = True):
""" Parses the options from command line and any conf files
currently added.
The final parameter should be only called from main programs
at the point where they are prepared for us to call exit if
required; (For example when we detect the -h parameter).
"""
self.optparser.final = final
## Parse the command line options:
try:
(opts, args) = self.optparser.parse_args()
self.opts.clear()
## Update our cmdline dict:
for k in dir(opts):
v = getattr(opts, k)
if k in self.options and not v == None:
self.opts[k] = v
except UnboundLocalError:
raise RuntimeError("Unknown option - use -h to see help")
## If error() was called we catch it here
except RuntimeError:
opts = {}
## This gives us as much as was parsed so far
args = self.optparser.largs
self.optparse_opts = opts
self.args = args
if final:
## Reparse the config file again:
self.add_file(self._filename)
try:
## Help can only be set on the command line
if getattr(self.optparse_opts, "help"):
## Populate the metavars with the default values:
for opt in self.optparser.option_list:
try:
opt.metavar = "{0}".format((getattr(self, opt.dest) or
opt.dest.upper()))
except Exception, _e:
pass
self.optparser.print_help()
sys.exit(0)
except AttributeError:
pass
## Set the cache invalidators on the cache now:
import volatility.cache as cache
for k, v in self.cache_invalidators.items():
cache.CACHE.invalidate_on(k, v)
def remove_option(self, option):
""" Removes options both from the config file parser and the
command line parser
This should only by used on options *before* they have been read,
otherwise things could get very confusing.
"""
option = option.lower()
if option in self.cache_invalidators:
del self.cache_invalidators[option]
normalized_option = option.replace("-", "_")
if normalized_option not in self.options:
return
self.options.remove(normalized_option)
if normalized_option in self.readonly:
del self.readonly[normalized_option]
if normalized_option in self.default_opts:
del self.default_opts[normalized_option]
if normalized_option in self._absolute:
del self._absolute[normalized_option]
del self.docstrings[normalized_option]
self.optparser.remove_option("--{0}".format(option))
try:
self.parse_options(False)
except AttributeError:
pass
def add_option(self, option, short_option = None,
cache_invalidator = True,
**args):
""" Adds options both to the config file parser and the
command line parser.
Args:
option: The long option name.
short_option: An optional short option.
cache_invalidator: If set, when this option
changes all caches are invalidated.
"""
option = option.lower()
if cache_invalidator:
self.cache_invalidators[option] = lambda : self.get_value(option)
normalized_option = option.replace("-", "_")
if normalized_option in self.options:
return
self.options.append(normalized_option)
## If this is read only we store it in a special dict
try:
if args['readonly']:
self.readonly[normalized_option] = args['default']
del args['readonly']
except KeyError:
pass
## If there is a default specified, we update our defaults dict:
try:
default = args['default']
try:
default = eval(default, self.g_dict)
except:
pass
self.default_opts[normalized_option] = default
del args['default']
except KeyError:
pass
try:
self._absolute[normalized_option] = args['absolute']
del args['absolute']
except KeyError:
pass
self.docstrings[normalized_option] = args.get('help', None)
if short_option:
self.optparser.add_option("-{0}".format(short_option), "--{0}".format(option), **args)
else:
self.optparser.add_option("--{0}".format(option), **args)
## update the command line parser
## We have to do the try-catch for python 2.4 support of short
## arguments. It can be removed when python 2.5 is a requirement
try:
self.parse_options(False)
except AttributeError:
pass
def update(self, key, value):
""" This can be used by scripts to force a value of an option """
self.readonly[key.lower()] = value
def get_value(self, key):
return getattr(self, key.replace("-", "_"))
def __getattr__(self, attr):
## If someone is looking for a configuration parameter but
## we have not parsed anything yet - do so now.
if self.opts == None:
self.parse_options(False)
## Maybe its a class method?
try:
return super(ConfObject, self).__getattribute__(attr)
except AttributeError:
pass
## Is it a ready only parameter (i.e. can not be overridden by
## the config file)
try:
return self.readonly[attr.lower()]
except KeyError:
pass
## Try to find the attribute in the command line options:
try:
return self.opts[attr.lower()]
except KeyError:
pass
## Has it already been parsed?
try:
tmp = getattr(self.optparser.values, attr.lower())
if tmp:
return tmp
except AttributeError:
pass
## Was it given in the environment?
try:
return os.environ["VOLATILITY_" + attr.upper()]
except KeyError:
pass
## No - try the configuration file:
try:
return self.cnf_opts[attr.lower()]
except KeyError:
pass
## No - is there a default for it?
try:
return self.default_opts[attr.lower()]
except KeyError:
pass
## Maybe its just a command line option:
try:
if not attr.startswith("_") and self.optparse_opts:
return getattr(self.optparse_opts, attr.lower())
except AttributeError:
pass
raise AttributeError("Parameter {0} is not configured - try setting it on the command line (-h for help)".format(attr))
class DummyConfig(ConfObject):
pass
config = ConfObject()
if os.access(default_config, os.R_OK):
config.add_file(default_config)
else:
config.add_file("volatilityrc")
default_conf_path = ".volatilityrc"
try:
default_conf_path = os.environ['HOME'] + '/.volatilityrc'
except KeyError:
pass
config.add_option("CONF-FILE", default = default_conf_path,
cache_invalidator = False,
help = "User based configuration file")
config.add_file(config.CONF_FILE)
| gpl-2.0 |
zbqf109/goodo | openerp/addons/l10n_cr/__openerp__.py | 4 | 2892 | # -*- encoding: utf-8 -*-
##############################################################################
#
# __openerp__.py
# l10n_cr_account
# First author: Carlos Vásquez <[email protected]> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
{
'name': 'Costa Rica - Accounting',
'version': '1.0',
'url': 'http://launchpad.net/openerp-costa-rica',
'author': 'ClearCorp S.A.',
'website': 'http://clearcorp.co.cr',
'category': 'Localization/Account Charts',
'description': """
Chart of accounts for Costa Rica.
=================================
Includes:
---------
* account.account.template
* account.tax.template
* account.chart.template
Everything is in English with Spanish translation. Further translations are welcome,
please go to http://translations.launchpad.net/openerp-costa-rica.
""",
'depends': ['account'],
'data': [
'data/l10n_cr_base_data.xml',
'data/account_chart_template.xml',
'data/account_account_template.xml',
'data/account_chart_template_after.xml',
'data/account_tax_template.xml',
'data/account.chart.template.yml',
],
'installable': True,
}
| gpl-3.0 |
dsavoiu/kafe2 | kafe2/core/constraint.py | 1 | 12225 | import abc
import numpy as np
import six
from ..fit.io.file import FileIOMixin
__all__ = ['ParameterConstraintException', 'ParameterConstraint', 'GaussianSimpleParameterConstraint',
'GaussianMatrixParameterConstraint']
class ParameterConstraintException(Exception):
pass
@six.add_metaclass(abc.ABCMeta)
class ParameterConstraint(FileIOMixin, object):
"""
Abstract base class for parameter constraints.
Subclasses must implement the ``cost`` method.
"""
def __init__(self):
pass
@property
@abc.abstractmethod
def extra_ndf(self):
"""
:return: the additional number of degrees of freedom introduced by this constraint.
"""
def _get_base_class(self):
return ParameterConstraint
def _get_object_type_name(self):
return 'parameter_constraint'
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
pass
class GaussianSimpleParameterConstraint(ParameterConstraint):
def __init__(self, index, value, uncertainty, relative=False):
"""
Simple class for applying a gaussian constraint to a single parameter of a fit.
:param index: The index of the parameter to be constrained
:type index: int
:param value: The value to which the parameter should be constrained
:type value: float
:param uncertainty: The uncertainty with which the parameter should be constrained to the given value
:type uncertainty: float
:param relative: Whether the given uncertainty is relative to the given value
:type relative: bool
"""
self._index = index
self._value = value
if relative:
self._uncertainty_abs = None
self._uncertainty_rel = uncertainty
else:
self._uncertainty_abs = uncertainty
self._uncertainty_rel = None
self._relative = relative
super(GaussianSimpleParameterConstraint, self).__init__()
@property
def index(self):
"""the index of the constrained parameter"""
return self._index
@property
def value(self):
"""the value to which the parameter is being constrained"""
return self._value
@property
def uncertainty(self):
"""the absolute uncertainty with which the parameter is being constrained"""
if self._uncertainty_abs is None:
self._uncertainty_abs = self._uncertainty_rel * self.value
return self._uncertainty_abs
@property
def uncertainty_rel(self):
"""the uncertainty relative to ``value`` with which the parameter is being constrained"""
if self._uncertainty_rel is None:
self._uncertainty_rel = self._uncertainty_abs / self.value
return self._uncertainty_rel
@property
def relative(self):
"""whether the constraint was initialized with a relative uncertainty"""
return self._relative
@property
def extra_ndf(self):
return 1
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
More specifically, the constraint first picks the value from ``parameter_values`` at ``self.index``.
The constraint then calculates the residual by subtracting ``self.value``.
The final cost is calculated by dividing the residual by ``self.uncertainty`` and squaring the result.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
return ((parameter_values[self.index] - self.value) / self.uncertainty) ** 2
class GaussianMatrixParameterConstraint(ParameterConstraint):
def __init__(self, indices, values, matrix, matrix_type='cov', uncertainties=None, relative=False):
"""
Advanced class for applying correlated constraints to several parameters of a fit.
The order of ``indices``, ``values``, ``matrix``, and ``uncertainties`` must be aligned.
In other words the first index must belong to the first value, the first row/column in the matrix, etc.
Let N be the number of parameters to be constrained.
:param indices: The indices of the parameters to be constrained
:type indices: iterable of int, shape (N,)
:param values: The values to which the parameters should be constrained
:type values: iterable of float, shape (N,)
:param matrix: The matrix that defines the correlation between the parameters. By default interpreted as a
covariance matrix. Can also be interpreted as a correlation matrix by setting ``matrix_type``
:type matrix: iterable of float, shape (N, N)
:param matrix_type: Whether the matrix should be interpreted as a covariance matrix or as a correlation matrix
:type matrix_type: str, either 'cov' or 'cor'
:param uncertainties: The uncertainties to be used in conjunction with a correlation matrix
:type uncertainties: ``None`` or iterable of float, shape (N,)
:param relative: Whether the covariance matrix/the uncertainties should be interpreted as relative to ``values``
:type relative: bool
"""
self._indices = np.array(indices)
self._values = np.array(values)
_matrix_array = np.array(matrix)
if not np.array_equal(_matrix_array, _matrix_array.T):
raise ValueError('The matrix for parameter constraints must be symmetric!')
if len(self._values.shape) != 1 or self._values.shape * 2 != _matrix_array.shape:
raise ValueError(
'Expected values and cov_mat to be of shapes (N, ), (N, N) but received shapes %s, %s instead!'
% (self._values.shape, _matrix_array.shape))
if matrix_type == 'cov':
pass
elif matrix_type == 'cor':
if np.any(np.diag(_matrix_array) != 1.0):
raise ValueError('The correlation matrix has diagonal elements that aren\'t equal to 1!')
if np.any(_matrix_array> 1.0):
raise ValueError('The correlation matrix has elements greater than 1!')
if np.any(_matrix_array < -1.0):
raise ValueError('The correlation matrix has elements smaller than -1!')
else:
raise ValueError('Unknown matrix_type: %s, must be either cov or cor!' % matrix_type)
if matrix_type == 'cov':
if relative:
self._cov_mat_abs = None
self._cov_mat_rel = _matrix_array
self._cor_mat = None
else:
self._cov_mat_abs = _matrix_array
self._cov_mat_rel = None
self._cor_mat = None
if uncertainties is not None:
raise ValueError('Uncertainties can only be specified if matrix_type is cov!')
self._uncertainties_abs = None
self._uncertainties_rel = None
else:
self._cov_mat_abs = None
self._cov_mat_rel = None
self._cor_mat = _matrix_array
if uncertainties is None:
raise ValueError('If matrix_type is cor uncertainties must be specified!')
if relative:
self._uncertainties_abs = None
self._uncertainties_rel = uncertainties
else:
self._uncertainties_abs = uncertainties
self._uncertainties_rel = None
self._matrix_type = matrix_type
self._relative = relative
self._cov_mat_inverse = None
super(GaussianMatrixParameterConstraint, self).__init__()
@property
def indices(self):
"""the indices of the parameters to be constrained"""
return self._indices
@property
def values(self):
"""the values to which the parameters are being constrained"""
return self._values
@property
def cov_mat(self):
"""the absolute covariance matrix between the parameter uncertainties"""
if self._cov_mat_abs is None:
if self.matrix_type == 'cov':
self._cov_mat_abs = self._cov_mat_rel * np.outer(self.values, self.values)
else:
self._cov_mat_abs = self._cor_mat * np.outer(self.uncertainties, self.uncertainties)
return self._cov_mat_abs
@property
def cov_mat_rel(self):
"""the covariance matrix between the parameter uncertainties relative to ``self.values``"""
if self._cov_mat_rel is None:
if self.matrix_type == 'cov':
self._cov_mat_rel = self._cov_mat_abs / np.outer(self.values, self.values)
else:
self._cov_mat_rel = self._cor_mat * np.outer(self.uncertainties_rel, self.uncertainties_rel)
return self._cov_mat_rel
@property
def cor_mat(self):
"""the correlation matrix between the parameter uncertainties"""
if self._cor_mat is None:
# if the originally specified cov mat was relative, calculate the cor mat based on that
if self._relative:
self._cor_mat = self.cov_mat_rel / np.outer(self.uncertainties_rel, self.uncertainties_rel)
else:
self._cor_mat = self.cov_mat / np.outer(self.uncertainties, self.uncertainties)
return self._cor_mat
@property
def uncertainties(self):
"""the uncorrelated, absolute uncertainties for the parameters to be constrained to"""
if self._uncertainties_abs is None:
if self.matrix_type == 'cov':
self._uncertainties_abs = np.sqrt(np.diag(self.cov_mat))
else:
self._uncertainties_abs = self.uncertainties_rel * self.values
return self._uncertainties_abs
@property
def uncertainties_rel(self):
"""the uncorrelated uncertainties for the parameters to be constrained to relative to ``self.values``"""
if self._uncertainties_rel is None:
if self.matrix_type == 'cov':
self._uncertainties_rel = np.sqrt(np.diag(self.cov_mat_rel))
else:
self._uncertainties_rel = self.uncertainties / self.values
return self._uncertainties_rel
@property
def matrix_type(self):
"""the type of matrix with which the constraint was initialized"""
return self._matrix_type
@property
def relative(self):
"""whether the constraint was initialized with a relative covariance matrix/with relative uncertainties"""
return self._relative
@property
def cov_mat_inverse(self):
"""the inverse of the covariance matrix between the parameter uncertainties"""
if self._cov_mat_inverse is None:
self._cov_mat_inverse = np.linalg.inv(self.cov_mat)
return self._cov_mat_inverse
@property
def extra_ndf(self):
return len(self.indices)
def cost(self, parameter_values):
"""
Calculates additional cost depending on the fit parameter values.
More specifically, the constraint first picks values from ``parameter_values`` according to ``self.indices``.
The constraint then calculates the residuals by subtracting ``self.values``.
The final cost is calculated by applying the residuals to both sides of ``self.cov_mat_inverse``
via dot product.
:param parameter_values: The current parameter values of the fit
:type parameter_values: iterable of float
:return: The additional cost imposed by the given parameter values
:rtype: float
"""
_selected_par_values = np.asarray(parameter_values)[self.indices]
_res = _selected_par_values - self.values
return _res.dot(self.cov_mat_inverse).dot(_res)
| gpl-3.0 |
akubera/AliMaster | alimaster/gui/mainwindow.py | 1 | 4120 | #
# alimaster/gui/mainwindow.py
#
from tkinter import * # noqa
from tkinter.ttk import * # noqa
from alimaster import __version__
from .filebrowser import FileBrowserWindow
from .help_window import HelpWindow
from .settings_window import SettingsWindow
import alimaster
class MainWindow():
"""
The main window which holds some status about the ALIEN connection and
provides buttons which load all the other alimaster windows.
If there are other child windows open, the MainWindow will not close, but
minimize to taskbar.
"""
def __init__(self, app):
"""
Create the main 'control' window of the AliMaster program
"""
self.app = app
self.root = app.root
self.window = app.get_new_window("Alimaster Control", (220, 500))
self.window.protocol("WM_DELETE_WINDOW", self.close)
# self.window = Toplevel(master)
# self.window.minsize(220,500)
# self.window.title("Alimaster Control")
# self.window.protocol("WM_DELETE_WINDOW", self.hide)
# self.style = Style()
# GenerateStyle(self.style)
# status_style.configure("StatusGood.TLabel", foreground="green")
# status_style.configure("StatusBad.TLabel", foreground="red")
self.frame = Frame(self.window)
self.status_bar = Frame(self.frame)
self.status_bar.label = Label(self.status_bar,
text="Status"
)
self.status_bar.label.pack(side=LEFT)
self.status_bar.status = Label(self.status_bar,
text="●"
)
self.status_bar.status.pack()
self.label = Label(self.frame,
text="AliMaster v%s" % (__version__),
font=('DejaVu Mono', 16)
)
self.label.pack(pady=9, padx=4)
self.status_bar.pack(fill=X, pady=(9, 3), padx=4)
self.add_button('File Browser',
self.create_filebrowser,
fill=X,
pady=(9, 3),
padx=4)
self.add_button('Settings',
self.create_settings_window,
fill=X,
pady=(9, 3),
padx=4)
self.add_button('Help',
self.create_helpwindow,
fill=X,
pady=(9, 3),
padx=4)
self.add_button("Load ROOT",
self.load_root,
fill=X,
pady=(3, 3),
padx=4)
self.add_button("Quit",
self.app.quit,
fill=X,
pady=(3, 9),
padx=4)
self.set_status_good()
self.frame.pack(fill=BOTH, expand=1)
def add_button(self, text, command, **pack_args):
Button(self.frame,
text=text,
command=command
).pack(**pack_args)
def quit(self):
print("[MainWindow::quit]")
self.root.after(0, self.root.quit)
def hide(self):
self.window.withdraw()
def show(self):
self.window.update()
def close(self):
print('window_count', self.app._win_count)
if self.app._win_count < 2:
self.app.quit()
else:
self.window.iconify()
def create_filebrowser(self):
FileBrowserWindow(self.app)
def create_helpwindow(self):
HelpWindow(self.app)
def create_settings_window(self):
SettingsWindow(self.app)
def set_status_bad(self):
self.status_bar.status.configure(style='StatusBad.TLabel')
def set_status_good(self):
from .style import style
self.status_bar.status.configure(style='StatusGood.TLabel')
def run_in_thread():
pass
def load_root(self):
alimaster.import_root_module()
| lgpl-3.0 |
omarkhan/opencraft | instance/serializers/logentry.py | 1 | 1352 | # -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
LogEntry serializers (API representation)
"""
# Imports #####################################################################
from rest_framework import serializers
# Serializers #################################################################
class LogEntrySerializer(serializers.Serializer): #pylint: disable=abstract-method
"""
Log entries API serializer
"""
level = serializers.CharField(read_only=True)
text = serializers.CharField(read_only=True)
created = serializers.DateTimeField(read_only=True)
| agpl-3.0 |
2015fallproject/2015fallcase2 | static/Brython3.2.0-20150701-214155/Lib/test/unittests/test_memoryio.py | 25 | 26610 | """Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
import unittest
from test import support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.__init__, [])
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
self.assertEqual(memio.read(4), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
self.assertEqual(memio.readline(5), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
self.assertEqual(memio.readlines(15), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, '__next__'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, memio.__next__)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertRaises(ValueError, memio.writable)
self.assertRaises(ValueError, memio.readable)
self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explictly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj2.close()
self.assertRaises(ValueError, pickle.dumps, obj2, proto)
del __main__.PickleTestMemIO
class BytesIOMixin:
def test_getbuffer(self):
memio = self.ioclass(b"1234567890")
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
memio.seek(5)
buf = memio.getbuffer()
self.assertEqual(bytes(buf), b"1234567890")
# Trying to change the size of the BytesIO while a buffer is exported
# raises a BufferError.
self.assertRaises(BufferError, memio.write, b'x' * 100)
self.assertRaises(BufferError, memio.truncate)
# Mutating the buffer updates the BytesIO
buf[3:6] = b"abc"
self.assertEqual(bytes(buf), b"123abc7890")
self.assertEqual(memio.getvalue(), b"123abc7890")
# After the buffer gets released, we can resize the BytesIO again
del buf
support.gc_collect()
memio.truncate()
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin,
BytesIOMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array('b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tobytes(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array('b', list(buf))
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
def test_newline_argument(self):
self.assertRaises(TypeError, self.ioclass, newline=b"\n")
self.assertRaises(ValueError, self.ioclass, newline="error")
# These should not raise an error
for newline in (None, "", "\n", "\r", "\r\n"):
self.ioclass(newline=newline)
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = str
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = str
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
self.assertTrue(isinstance(state[2], dict) or state[2] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
basesize = support.calcobjsize('P2nN2Pn')
check = self.check_sizeof
self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
check(io.BytesIO(), basesize )
check(io.BytesIO(b'a'), basesize + 1 + 1 )
check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], str)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
self.assertTrue(isinstance(state[3], dict) or state[3] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
| agpl-3.0 |
polyaxon/polyaxon-api | polyaxon_lib/libs/decorators.py | 1 | 1178 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import six
from functools import wraps, WRAPPER_ASSIGNMENTS
import tensorflow as tf
class TfTemplate(object):
"""This decorator wraps a method with `tf.make_template`. For example,
Examples:
```python
>>> @tf_template('socpe_name')
... my_method():
... # Creates variables
```
"""
def __init__(self, scope):
self.scope = scope
@staticmethod
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if six.PY3:
return WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def __call__(self, func):
this = self
templated_func = tf.make_template(this.scope, func)
@wraps(func, assigned=TfTemplate.available_attrs(func))
def inner(*args, **kwargs):
return templated_func(*args, **kwargs)
return inner
tf_template = TfTemplate
| mit |
zaydhach/PyBotWarPro | libs/jython/Lib/distutils/archive_util.py | 7 | 6179 | """distutils.archive_util
Utility functions for creating archive files (tarballs, zip files,
that sort of thing)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
from distutils.dir_util import mkpath
from distutils import log
def make_tarball (base_name, base_dir, compress="gzip",
verbose=0, dry_run=0):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'. 'compress' must be "gzip" (the default), "compress",
"bzip2", or None. Both "tar" and the compression utility named by
'compress' must be on the default program search path, so this is
probably Unix-specific. The output tar file will be named 'base_dir' +
".tar", possibly plus the appropriate compression extension (".gz",
".bz2" or ".Z"). Return the output filename.
"""
# XXX GNU tar 1.13 has a nifty option to add a prefix directory.
# It's pretty new, though, so we certainly can't require it --
# but it would be nice to take advantage of it to skip the
# "create a tree of hardlinks" step! (Would also be nice to
# detect GNU tar to use its 'z' option and save a step.)
compress_ext = { 'gzip': ".gz",
'bzip2': '.bz2',
'compress': ".Z" }
# flags for compression program, each element of list will be an argument
compress_flags = {'gzip': ["-f9"],
'compress': ["-f"],
'bzip2': ['-f9']}
if compress is not None and compress not in compress_ext.keys():
raise ValueError, \
"bad value for 'compress': must be None, 'gzip', or 'compress'"
archive_name = base_name + ".tar"
mkpath(os.path.dirname(archive_name), dry_run=dry_run)
cmd = ["tar", "-cf", archive_name, base_dir]
spawn(cmd, dry_run=dry_run)
if compress:
spawn([compress] + compress_flags[compress] + [archive_name],
dry_run=dry_run)
return archive_name + compress_ext[compress]
else:
return archive_name
# make_tarball ()
def make_zipfile (base_name, base_dir, verbose=0, dry_run=0):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
try:
import zipfile
except ImportError:
zipfile = None
zip_filename = base_name + ".zip"
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
# If zipfile module is not available, try spawning an external
# 'zip' command.
if zipfile is None:
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
try:
spawn(["zip", zipoptions, zip_filename, base_dir],
dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise DistutilsExecError, \
("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
else:
log.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
def visit (z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
z.write(path, path)
log.info("adding '%s'" % path)
if not dry_run:
z = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
os.path.walk(base_dir, visit, z)
z.close()
return zip_filename
# make_zipfile ()
ARCHIVE_FORMATS = {
'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (make_zipfile, [],"ZIP file")
}
def check_archive_formats (formats):
for format in formats:
if not ARCHIVE_FORMATS.has_key(format):
return format
else:
return None
def make_archive (base_name, format,
root_dir=None, base_dir=None,
verbose=0, dry_run=0):
"""Create an archive file (eg. zip or tar). 'base_name' is the name
of the file to create, minus any format-specific extension; 'format'
is the archive format: one of "zip", "tar", "ztar", or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
"""
save_cwd = os.getcwd()
if root_dir is not None:
log.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = { 'dry_run': dry_run }
try:
format_info = ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError, "unknown archive format '%s'" % format
func = format_info[0]
for (arg,val) in format_info[1]:
kwargs[arg] = val
filename = apply(func, (base_name, base_dir), kwargs)
if root_dir is not None:
log.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
# make_archive ()
| gpl-2.0 |
UFAL-DSG/cloud-asr | tests/test_batch_recognition.py | 1 | 2520 | import os
import json
import time
import urllib2
import StringIO
import unittest
from jsonschema import validate
class TestBatchRecognition(unittest.TestCase):
def setUp(self):
time.sleep(1)
def test_batch_recognition(self):
response = self.get_response_for_wav()
self.assertResponseHasCorrectSchema(response)
def get_response_for_wav(self):
url = "http://127.0.0.1:8000/recognize?lang=en-towninfo&lm=new_lm"
wav = self.load_wav()
headers = {"Content-Type": "audio/x-wav; rate=16000;"}
request = urllib2.Request(url, wav, headers)
return urllib2.urlopen(request).read()
def load_wav(self):
basedir = os.path.dirname(os.path.realpath(__file__))
return open("%s/../resources/test.wav" % basedir, "rb").read()
def assertResponseHasCorrectSchema(self, response):
schema = {
"type": "object",
"properties": {
"result": {
"type": "array",
"items": {
"type": "object",
"properties": {
"alternative": {
"type": "array",
"items": {
"type": "object",
"properties": {
"confidence": {"type": "number"},
"transcript": {"type": "string"},
},
"required": ["confidence", "transcript"],
"additionalProperties": False,
},
"minItems": 1,
},
"final": {"type": "boolean"},
},
"required": ["alternative", "final"],
"additionalProperties": False,
},
"minItems": 1,
},
"result_index": {"type": "number"},
"chunk_id": {"type": "string"},
"request_id": {"type": "string"},
},
"required": ["result", "result_index", "request_id"],
"additionalProperties": False,
}
validationResult = validate(json.loads(response), schema)
self.assertIsNone(validationResult, msg="Response has invalid schema")
| apache-2.0 |
prefetchnta/questlab | bin/x64bin/python/36/Lib/multiprocessing/semaphore_tracker.py | 1 | 5551 | #
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
self._pid = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._pid is not None:
# semaphore tracker was launched before, is it still running?
pid, status = os.waitpid(self._pid, os.WNOHANG)
if not pid:
# => still alive
return
# => dead, launch it again
os.close(self._fd)
self._fd = None
self._pid = None
warnings.warn('semaphore_tracker: process died unexpectedly, '
'relaunching. Some semaphores might leak.')
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
pid = util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
self._pid = pid
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
| lgpl-2.1 |
drybjed/ansible-modules-extras | cloud/cloudstack/cs_account.py | 14 | 12533 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_account
short_description: Manages accounts on Apache CloudStack based clouds.
description:
- Create, disable, lock, enable and remove accounts.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of account.
required: true
username:
description:
- Username of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
password:
description:
- Password of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
first_name:
description:
- First name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
last_name:
description:
- Last name of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
email:
description:
- Email of the user to be created if account did not exist.
- Required on C(state=present).
required: false
default: null
timezone:
description:
- Timezone of the user to be created if account did not exist.
required: false
default: null
network_domain:
description:
- Network domain of the account.
required: false
default: null
account_type:
description:
- Type of the account.
required: false
default: 'user'
choices: [ 'user', 'root_admin', 'domain_admin' ]
domain:
description:
- Domain the account is related to.
required: false
default: 'ROOT'
state:
description:
- State of the account.
required: false
default: 'present'
choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked' ]
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
username: customer_xy
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
# Lock an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: locked
# Disable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: disabled
# Enable an existing account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: enabled
# Remove an account in domain 'CUSTOMERS'
local_action:
module: cs_account
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
name:
description: Name of the account.
returned: success
type: string
sample: [email protected]
account_type:
description: Type of the account.
returned: success
type: string
sample: user
account_state:
description: State of the account.
returned: success
type: string
sample: enabled
network_domain:
description: Network domain of the account.
returned: success
type: string
sample: example.local
domain:
description: Domain the account is related.
returned: success
type: string
sample: ROOT
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackAccount(AnsibleCloudStack):
def __init__(self, module):
AnsibleCloudStack.__init__(self, module)
self.account = None
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_account(self):
if not self.account:
args = {}
args['listall'] = True
args['domainid'] = self.get_domain('id')
accounts = self.cs.listAccounts(**args)
if accounts:
account_name = self.module.params.get('name')
for a in accounts['account']:
if account_name in [ a['name'] ]:
self.account = a
break
return self.account
def enable_account(self):
account = self.get_account()
if not account:
self.module.fail_json(msg="Failed: account not present")
if account['state'].lower() != 'enabled':
self.result['changed'] = True
args = {}
args['id'] = account['id']
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
if not self.module.check_mode:
res = self.cs.enableAccount(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
account = res['account']
return account
def lock_account(self):
return self.lock_or_disable_account(lock=True)
def disable_account(self):
return self.lock_or_disable_account()
def lock_or_disable_account(self, lock=False):
account = self.get_account()
if not account:
self.module.fail_json(msg="Failed: account not present")
# we need to enable the account to lock it.
if lock and account['state'].lower() == 'disabled':
account = self.enable_account()
if lock and account['state'].lower() != 'locked' \
or not lock and account['state'].lower() != 'disabled':
self.result['changed'] = True
args = {}
args['id'] = account['id']
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
args['lock'] = lock
if not self.module.check_mode:
account = self.cs.disableAccount(**args)
if 'errortext' in account:
self.module.fail_json(msg="Failed: '%s'" % account['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
account = self._poll_job(account, 'account')
return account
def present_account(self):
missing_params = []
if not self.module.params.get('email'):
missing_params.append('email')
if not self.module.params.get('username'):
missing_params.append('username')
if not self.module.params.get('password'):
missing_params.append('password')
if not self.module.params.get('first_name'):
missing_params.append('first_name')
if not self.module.params.get('last_name'):
missing_params.append('last_name')
if missing_params:
self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
account = self.get_account()
if not account:
self.result['changed'] = True
args = {}
args['account'] = self.module.params.get('name')
args['domainid'] = self.get_domain('id')
args['accounttype'] = self.get_account_type()
args['networkdomain'] = self.module.params.get('network_domain')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['firstname'] = self.module.params.get('first_name')
args['lastname'] = self.module.params.get('last_name')
args['email'] = self.module.params.get('email')
args['timezone'] = self.module.params.get('timezone')
if not self.module.check_mode:
res = self.cs.createAccount(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
account = res['account']
return account
def absent_account(self):
account = self.get_account()
if account:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.deleteAccount(id=account['id'])
if 'errortext' in account:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self._poll_job(res, 'account')
return account
def get_result(self, account):
if account:
if 'name' in account:
self.result['name'] = account['name']
if 'accounttype' in account:
for key,value in self.account_types.items():
if value == account['accounttype']:
self.result['account_type'] = key
break
if 'state' in account:
self.result['account_state'] = account['state']
if 'domain' in account:
self.result['domain'] = account['domain']
if 'networkdomain' in account:
self.result['network_domain'] = account['networkdomain']
return self.result
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked' ], default='present'),
account_type = dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
network_domain = dict(default=None),
domain = dict(default='ROOT'),
email = dict(default=None),
first_name = dict(default=None),
last_name = dict(default=None),
username = dict(default=None),
password = dict(default=None),
timezone = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
api_key = dict(default=None),
api_secret = dict(default=None, no_log=True),
api_url = dict(default=None),
api_http_method = dict(choices=['get', 'post'], default='get'),
api_timeout = dict(type='int', default=10),
),
required_together = (
['api_key', 'api_secret', 'api_url'],
),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_acc = AnsibleCloudStackAccount(module)
state = module.params.get('state')
if state in ['absent']:
account = acs_acc.absent_account()
elif state in ['enabled']:
account = acs_acc.enable_account()
elif state in ['disabled']:
account = acs_acc.disable_account()
elif state in ['locked']:
account = acs_acc.lock_account()
else:
account = acs_acc.present_account()
result = acs_acc.get_result(account)
except CloudStackException, e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
DFO-Ocean-Navigator/Ocean-Data-Map-Project | scripts/drifter_process.py | 1 | 11800 | #!env python
'''
Reads in the raw data from the Joubeh ftp site and compiles the separate files
into one file per drifter
'''
import os
import re
import pandas as pd
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
import time
import geopy
import geopy.distance
from scipy import interpolate
import sys
import scipy
import scipy.fftpack
dirname = "/data/drifter/raw/"
shareddir = "/data/drifter/output/"
metadatafile = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"drifter_metadata.xlsx"
)
def appendDateVariable(dataset, dataframe, name, variable):
'''
Appends a date/time type variable from a dataframe to a NetCDF dataset.
Parameters:
dataset - the NetCDF Dataset
dataframe - the Pandas Dataframe
name - the NetCDF name of the variable
variable - the column name of the variable in the dataframe
'''
var = dataset.createVariable(name, 'f4', ('data_date',))
var.units = "seconds since 1950-01-01 00:00:00"
var.calendar = "gregorian"
var.time_origin = "1950-01-01 00:00:00"
origin = datetime(1950, 01, 01, 00, 00, 00)
var[:] = [(t - origin).total_seconds() for t in
dataframe[variable].tolist()]
def appendVariable(dataset, dataframe, variable, datatype, units=None,
long_name=None):
'''
Appends a variable from a dataframe to a NetCDF dataset.
Parameters:
dataset - the NetCDF Dataset
dataframe - the Pandas Dataframe
variable - the column name of the variable in the dataframe
datatype - the data type in NetCDF format
units - the units of the variable
long_name - the variable's human readable name
'''
if variable in dataframe.columns:
var = dataset.createVariable(
variable.lower(), datatype, ('data_date',))
var[:] = dataframe[variable].values
if units:
var.units = units
if long_name:
var.long_name = long_name
def bearing(source, destination):
l1 = np.radians(source.longitude)
l2 = np.radians(destination.longitude)
dl = l2 - l1
p1 = np.radians(source.latitude)
p2 = np.radians(destination.latitude)
y = np.sin(dl) * np.cos(p2)
x = np.cos(p1) * np.sin(p2) - np.sin(p1) * np.cos(p2) * np.cos(dl)
return np.pi / 2 - np.arctan2(y, x)
# clear out the directory where the merged files will be created
if os.path.isdir(shareddir) == 1:
csvfilelist = [f for f in os.listdir(shareddir) if f.endswith(".csv")]
ncfilelist = [f for f in os.listdir(shareddir) if f.endswith(".nc")]
for f in csvfilelist + ncfilelist:
os.remove(os.path.join(shareddir, f))
# Read the metadata from the Excel sheet
metadata = pd.read_excel(metadatafile, skiprows=3, index_col=1)
buoy_files = {}
# For each directory
for d in os.listdir(dirname):
for f in os.listdir(os.path.join(dirname, d)):
if not f.endswith(".csv"):
continue
buoy_id = re.split('_', f)[0]
# Add a dummy entry in buoy_files
if buoy_id not in buoy_files:
buoy_files[buoy_id] = []
buoy_files[buoy_id].append(os.path.join(dirname, d, f))
# For each buoy
for buoy_id, files in buoy_files.items():
if len(sys.argv) > 1:
if buoy_id not in sys.argv[1::]:
continue
# Clear the list
dataframes = []
# Read each CSV file into a dataframe and add it to the list
for f in files:
dataframes.append(pd.read_csv(f))
# Concatenate all the dataframes together
dataframe = pd.concat(dataframes).sort_values('Data Date(GMT)')
# We don't need the individual dataframes anymore, release that memory
dataframes = None
# Convert dates to datetime objects
for col in ['Data Date(GMT)', 'Received Date(GMT)', 'Sent Date(GMT)']:
dataframe[col] = pd.to_datetime(dataframe[col],
format='%Y-%m-%d %H:%M:%S')
dataframe.drop_duplicates(subset='Data Date(GMT)', inplace=True)
'''
Any QC of the data should be done here.
'''
changed = True
std = None
mean = None
while changed:
dataframe.reset_index(inplace=True)
dataframe.drop('index', axis=1, inplace=True)
# Get geopy points for each lat,lon pair
points = dataframe[
['LATITUDE', 'LONGITUDE']
].apply(lambda x: geopy.Point(x[0], x[1]), axis=1)
# get distances in nautical miles
ends = points.shift(1)
distances = []
for idx, start in enumerate(points):
distances.append(geopy.distance.vincenty(start, ends[idx]).nm)
distances = np.ma.masked_invalid(distances)
# get time differences in hours
times = dataframe['Data Date(GMT)'].diff().apply(
lambda x: x.total_seconds() / 3600.0
)
# Speed in knots
speed = distances / times
# Drop anything where the speed is 2 standard deviations from the mean
if std is None:
std = np.std(speed)
mean = np.mean(speed)
si = np.where((abs(speed - mean) > 3 * std) & (speed > 10))[0]
if len(si) > 0:
dataframe.drop(points.index[si[0]], inplace=True)
print "\tDropping point with speed=%0.1f knots" % speed[si[0]]
else:
changed = False
del si
'''
QA is now done, back to our regularly scheduled programming.
'''
# Calculate speeds
start = dataframe[
['LATITUDE', 'LONGITUDE']
].apply(lambda x: geopy.Point(x[0], x[1]), axis=1)
# get distances in nautical miles
ends = start.shift(-1)
dx = []
dy = []
for idx in range(0, len(start) - 1):
d = geopy.distance.vincenty(start[idx], ends[idx]).meters
b = bearing(start[idx], ends[idx])
dy.append(np.sin(b) * d)
dx.append(np.cos(b) * d)
dt = dataframe['Data Date(GMT)'].diff()
times = dt.apply(
lambda x: x.total_seconds()
)
vx = dx / times[1::]
vy = dy / times[1::]
vt = (dataframe['Data Date(GMT)'] + dt / 2)[1::].apply(
lambda x: time.mktime(x.timetuple())
)
if (vt.size<= 2 or vx.size <=2 or vy.size <=2 ):
print "vt,vx,or vy are to small to use, must nbe greater than 1 (the drifter should have more than 1 point)"
continue
fx = interpolate.interp1d(vt, vx, bounds_error=False, kind='linear')
fy = interpolate.interp1d(vt, vy, bounds_error=False, kind='linear')
target_times = dataframe['Data Date(GMT)'].apply(
lambda x: time.mktime(x.timetuple())
)
dataframe['VX'] = pd.Series(
fx(target_times),
index=dataframe.index
)
dataframe['VY'] = pd.Series(
fy(target_times),
index=dataframe.index
)
# Smooth the velocities
even_times = np.arange(
vt.iloc[0],
vt.iloc[-1],
3600.0
)
for v in [(fx, 'VX_smooth'), (fy, 'VY_smooth')]:
ve = np.ma.masked_invalid(v[0](even_times))
slices = np.ma.notmasked_contiguous(ve)
valid = slices[0]
sig = ve[valid]
t = even_times[valid]
M = sig.size
spectrum = scipy.fftpack.rfft(sig, n=M)
p1 = (30.0 / 24.0) / 24.0 * M / 2
p2 = (40.0 / 24.0) / 24.0 * M / 2
def ramp_filter(x):
if x <= p1:
return 1.0
elif x >= p2:
return 0.0
else:
return (x - p1) / (p2 - p1)
filtered_spec = [spectrum[i] * ramp_filter(i) for i in xrange(M)]
output = scipy.fftpack.irfft(filtered_spec)
f = scipy.interpolate.interp1d(
t, output, bounds_error=False, kind='linear'
)
fo = f(dataframe['Data Date(GMT)'].apply(
lambda x: time.mktime(x.timetuple())
))
dataframe[v[1]] = pd.Series(
fo,
index=dataframe.index
)
# Remove the hex data from the dataframe
if 'Hex Data' in dataframe.columns:
dataframe.drop('Hex Data', axis=1, inplace=True)
# Output File Paths
csv_path = os.path.join(shareddir, buoy_id + ".csv")
netcdf_path = os.path.join(shareddir, buoy_id + ".nc")
# Write out CSV file
dataframe.to_csv(csv_path, index=False, date_format='%Y-%m-%d %H:%M:%S')
# Write out NetCDF file
with Dataset(netcdf_path, "w", format='NETCDF4') as ds:
ds.createDimension('data_date', len(dataframe)) # Number of rows
ds.createDimension('meta', 1)
ds.buoyid = buoy_id
ds.description = 'CONCEPTS Ocean Drifter %s' % (buoy_id)
wmo = metadata['WMO ID'][int(buoy_id)]
if isinstance(wmo, pd.Series):
wmo = wmo.iloc[wmo.size - 1]
if isinstance(wmo, basestring):
wmo = wmo.strip()
ds.createVariable('wmo', str, ('meta',))[0] = str(wmo)
deployment = metadata['Dep. Type(ship name, cruise)'][int(buoy_id)]
if isinstance(deployment, pd.Series):
deployment = deployment.iloc[deployment.size - 1]
ds.createVariable('deployment', str, ('meta',))[0] = str(deployment)
ds.createVariable('imei', str, ('meta',))[0] = str(buoy_id)
endtime = dataframe['Data Date(GMT)'].tail(1).tolist()[0]
delta = (datetime.utcnow() - endtime).total_seconds() / 3600.0
if delta > 168:
ds.status = 'inactive'
elif delta > 24:
ds.status = 'not responding'
else:
ds.status = 'normal'
appendDateVariable(
ds, dataframe, 'data_date', 'Data Date(GMT)'
)
appendDateVariable(
ds, dataframe, 'received_date', 'Received Date(GMT)'
)
appendDateVariable(
ds, dataframe, 'sent_date', 'Sent Date(GMT)'
)
appendVariable(
ds, dataframe, "LATITUDE", 'f4', 'degrees_north', 'Latitude'
)
appendVariable(
ds, dataframe, "LONGITUDE", 'f4', 'degrees_east', 'Longitude'
)
appendVariable(
ds, dataframe,
"SST", 'f4', 'degree_Celsius', 'Sea Surface Temperature'
)
appendVariable(
ds, dataframe, "SBDTIME", 'i4', 'Seconds',
'Time for Iridium message transmission'
)
appendVariable(
ds, dataframe, "VBAT", 'f4', 'Volts', 'Battery Voltage'
)
appendVariable(
ds, dataframe, "TTFF", 'i4', 'Seconds', 'Time to first GPS fix'
)
appendVariable(
ds, dataframe, "FOM", 'i4'
)
appendVariable(
ds, dataframe, "MAXDB", 'i4'
)
appendVariable(
ds, dataframe, "AT", 'i4'
)
appendVariable(
ds, dataframe, "BP", 'f4', 'mbar', 'Barometric Pressure'
)
appendVariable(
ds, dataframe, "BPT", 'f4', 'mbar', 'Barometric Pressure Tendency'
)
appendVariable(
ds, dataframe, "RANGE", 'i4', None,
'Drogue Loss Strain Gauge Sensor Data'
)
appendVariable(
ds, dataframe, "GPSDELAY", 'i4', 'Seconds', 'GPS Delay'
)
appendVariable(
ds, dataframe, "SNR", 'i4', None, 'GPS Signal Strength'
)
appendVariable(
ds, dataframe, "VX", 'f4', 'm/s', 'Drifter X Velocity'
)
appendVariable(
ds, dataframe, "VY", 'f4', 'm/s', 'Drifter Y Velocity'
)
appendVariable(
ds, dataframe, "VX_smooth", 'f4', 'm/s',
'Drifter X Velocity (filtered)'
)
appendVariable(
ds, dataframe, "VY_smooth", 'f4', 'm/s',
'Drifter Y Velocity (filtered)'
)
| gpl-3.0 |
undoware/neutron-drive | neutron-drive/django/contrib/staticfiles/handlers.py | 85 | 2316 | import urllib
from urlparse import urlparse
from django.conf import settings
from django.core.handlers.wsgi import WSGIHandler
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.views import serve
class StaticFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to the static files directory, as
defined by the STATIC_URL setting, and serves those files.
"""
def __init__(self, application, base_dir=None):
self.application = application
if base_dir:
self.base_dir = base_dir
else:
self.base_dir = self.get_base_dir()
self.base_url = urlparse(self.get_base_url())
super(StaticFilesHandler, self).__init__()
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
utils.check_settings()
return settings.STATIC_URL
def _should_handle(self, path):
"""
Checks if the path should be handled. Ignores the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""
Returns the relative path to the media file on disk for the given URL.
"""
relative_url = url[len(self.base_url[2]):]
return urllib.url2pathname(relative_url)
def serve(self, request):
"""
Actually serves the request path.
"""
return serve(request, self.file_path(request.path), insecure=True)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
return super(StaticFilesHandler, self).get_response(request)
def __call__(self, environ, start_response):
if not self._should_handle(environ['PATH_INFO']):
return self.application(environ, start_response)
return super(StaticFilesHandler, self).__call__(environ, start_response)
| bsd-3-clause |
Endika/sale-workflow | sale_payment_term_interest/__openerp__.py | 15 | 1227 | # -*- coding: utf-8 -*-
#
#
# Authors: Guewen Baconnier
# Copyright 2015 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
{'name': 'Sales Payment Term Interests',
'version': '8.0.1.0.0',
'author': 'Camptocamp,Odoo Community Association (OCA)',
'license': 'AGPL-3',
'category': 'Accounting & Finance',
'depends': ['sale'],
'website': 'http://www.camptocamp.com',
'data': ['data/product_data.xml',
'view/sale_order_view.xml',
'view/account_payment_term_view.xml',
],
'test': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
wenkaiqiu/petal | uniform_model/templates/template_manager.py | 1 | 2524 | import logging
from itertools import chain
from uniform_model.templates.other import OtherTemplate
from uniform_model.templates.processor import ProcessorTemplate
from uniform_model.templates.storage import StorageTemplate
from .chassis import ChassisTemplate
from .switch import SwitchTemplate
logging.basicConfig(format='%(asctime)s <%(name)s> %(message)s')
logger = logging.getLogger('uniform_model.templates.template_manager')
logger.setLevel(logging.DEBUG)
# 注册已有设备模型
__global_register = {
'chasis': ChassisTemplate,
'switch': SwitchTemplate,
'processor': ProcessorTemplate,
'other': OtherTemplate,
'storage': StorageTemplate
}
# 获取设备模型,若不存在,返回None
def _get_model_type(model_type):
global __global_register
return __global_register.get(model_type, None)
class TemplateManager:
"""
负责Model的实例化,并管理已注册的模型实例
"""
__registered_models = {}
@classmethod
def _check_info(cls, model_info):
if "model_type" not in model_info:
raise NameError("<model_type> is needed in model_info")
model_type = model_info["model_type"]
# for item in chain(cls.conf["base"], cls.conf[model_type]):
# if item not in model_info:
# raise NameError(f"<{item}> is needed in model_info")
@classmethod
def set_conf(cls, conf):
logger.info("<ModelManager> set_conf")
cls.conf = {"base": filter(lambda x: conf[x], conf)}
for item in filter(lambda x: x.startswith("other"), conf):
cls.conf.update({item.split("_")[1]: filter(lambda x: conf[item][x], conf[item])})
@classmethod
def register_model(cls, model_info):
try:
cls._check_info(model_info)
logger.info(f"model <{model_info['name']}> checked success")
except NameError as e:
print(e)
model_type = model_info["model_type"]
model = _get_model_type(model_type)(**model_info)
# 检查该类型Model是否重复注册
if cls.get_model(model.name) is not None:
raise ValueError(f'model <{model.name}> already registered.')
cls.__registered_models.update({model.name: model})
logger.info(f'register model <{model.name}> success')
@classmethod
def list_all_registered(cls):
return cls.__registered_models
@classmethod
def get_model(cls, model_name):
return cls.__registered_models.get(model_name, None)
| bsd-3-clause |
roofit-dev/parallel-roofit-scripts | profiling/vincemark/analyze_c.py | 1 | 11894 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Patrick Bos
# @Date: 2016-11-16 16:23:55
# @Last Modified by: E. G. Patrick Bos
# @Last Modified time: 2017-06-28 14:49:23
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pathlib import Path
import itertools
import load_timing
pd.set_option("display.width", None)
def savefig(factorplot, fp):
try:
g.savefig(fp)
print("saved figure using pathlib.Path, apparently mpl is now pep 519 compatible! https://github.com/matplotlib/matplotlib/pull/8481")
except TypeError:
g.savefig(fp.__str__())
"""
cd ~/projects/apcocsm/code/profiling/vincemark
rsync --progress --include='*/' --include='*/*/' --include='timing*.json' --exclude='*' -zavr nikhef:project_atlas/apcocsm_code/profiling/vincemark/vincemark_c ./
"""
basepath = Path.home() / 'projects/apcocsm/code/profiling/vincemark/vincemark_c'
savefig_dn = basepath / 'analysis'
savefig_dn.mkdir(parents=True, exist_ok=True)
#### LOAD DATA FROM FILES
fpgloblist = [basepath.glob('%i.allier.nikhef.nl/*.json' % i)
for i in range(18551136, 18551255)]
# for i in itertools.chain(range(18445438, 18445581),
# range(18366732, 18367027))]
drop_meta = ['parallel_interleave', 'seed', 'print_level', 'timing_flag',
'optConst', 'workspace_filepath', 'time_num_ints']
skip_on_match = ['timing_RRMPFE_serverloop_p*.json', # skip timing_flag 8 output (contains no data)
]
if Path('df_numints.hdf').exists():
skip_on_match.append('timings_numInts.json')
dfs_sp, dfs_mp_sl, dfs_mp_ma = load_timing.load_dfs_coresplit(fpgloblist, skip_on_match=skip_on_match, drop_meta=drop_meta)
# #### TOTAL TIMINGS (flag 1)
df_totals_real = pd.concat([dfs_sp['full_minimize'], dfs_mp_ma['full_minimize']])
# ### ADD IDEAL TIMING BASED ON SINGLE CORE RUNS
df_totals_ideal = load_timing.estimate_ideal_timing(df_totals_real, groupby=['N_events', 'segment',
'N_chans', 'N_nuisance_parameters', 'N_bins'],
time_col='walltime_s')
df_totals = load_timing.combine_ideal_and_real(df_totals_real, df_totals_ideal)
# remove summed timings, they show nothing new
df_totals = df_totals[df_totals.segment != 'migrad+hesse+minos']
# # add combination of two categories
# df_totals['timeNIs/Nevents'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_events.astype(str)
# df_totals['timeNIs/Nbins'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_bins.astype(str)
# df_totals['timeNIs/Nnps'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_nuisance_parameters.astype(str)
# df_totals['timeNIs/Nchans'] = df_totals.time_num_ints.astype(str) + '/' + df_totals.N_chans.astype(str)
#### ANALYSIS
# full timings
g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row')
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
savefig(g, savefig_dn / f'total_timing.png')
for chans in df_totals.N_chans.unique():
for events in df_totals.N_events.unique():
for nps in df_totals.N_nuisance_parameters.unique():
data = df_totals[(df_totals.N_chans == chans) & (df_totals.N_events == events) & (df_totals.N_nuisance_parameters == nps)]
if len(data) > 0:
g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_bins', hue='timing_type', row='segment', estimator=np.min, data=data, legend_out=False, sharey='row')
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos --- N_channels = {chans}, N_events = {events}, N_nps = {nps}')
savefig(g, savefig_dn / f'total_timing_chan{chans}_event{events}_np{nps}.png')
# some more, focused on different parameters based on analysis of above plots
# scale with Nnps
g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_nuisance_parameters', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row')
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
savefig(g, savefig_dn / f'total_timing_col-Nnps.png')
# scale with Nchans
g = sns.factorplot(x='num_cpu', y='walltime_s', col='N_chans', hue='timing_type', row='segment', estimator=np.min, data=df_totals, legend_out=False, sharey='row')
plt.subplots_adjust(top=0.93)
g.fig.suptitle(f'total wallclock timing of migrad, hesse and minos')
savefig(g, savefig_dn / f'total_timing_col-Nchans.png')
print("Something is not going right with the numerical integral added iteration columns... are they structured the way I thought at all?")
raise SystemExit
#### NUMERICAL INTEGRAL TIMINGS
if not Path('df_numints.hdf').exists():
df_numints = dfs_mp_sl['numInts']
df_numints.to_hdf('df_numints.hdf', 'vincemark_a_numint_timings')
else:
print("loading numerical integral timings from HDF file...")
df_numints = pd.read_hdf('df_numints.hdf', 'vincemark_a_numint_timings')
print("...done")
load_timing.add_iteration_column(df_numints)
df_numints_min_by_iteration = df_numints.groupby('iteration').min()
df_numints_max_by_iteration = df_numints.groupby('iteration').max()
"""
#### RooRealMPFE TIMINGS
### MPFE evaluate @ client (single core) (flags 5 and 6)
mpfe_eval = pd.concat([v for k, v in dfs_mp_ma.items() if 'wall_RRMPFE_evaluate_client' in k] +
[v for k, v in dfs_mp_ma.items() if 'cpu_RRMPFE_evaluate_client' in k])
### add MPFE evaluate full timings (flag 4)
mpfe_eval_full = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_evaluate_full' in k])
mpfe_eval_full.rename(columns={'RRMPFE_evaluate_wall_s': 'time s'}, inplace=True)
mpfe_eval_full['cpu/wall'] = 'wall+INLINE'
mpfe_eval_full['segment'] = 'all'
mpfe_eval = mpfe_eval.append(mpfe_eval_full)
### total time per run (== per pid, but the other columns are also grouped-by to prevent from summing over them)
mpfe_eval_total = mpfe_eval.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'force_num_int'], as_index=False).sum()
#### ADD mpfe_eval COLUMN OF CPU_ID, ***PROBABLY***, WHICH SEEMS TO EXPLAIN DIFFERENT TIMINGS QUITE WELL
mpfe_eval_cpu_split = pd.DataFrame(columns=mpfe_eval.columns)
for num_cpu in range(2, 9):
mpfe_eval_num_cpu = mpfe_eval[(mpfe_eval.segment == 'all') * (mpfe_eval.num_cpu == num_cpu)]
mpfe_eval_num_cpu['cpu_id'] = None
for cpu_id in range(num_cpu):
mpfe_eval_num_cpu.iloc[cpu_id::num_cpu, mpfe_eval_num_cpu.columns.get_loc('cpu_id')] = cpu_id
mpfe_eval_cpu_split = mpfe_eval_cpu_split.append(mpfe_eval_num_cpu)
mpfe_eval_cpu_split_total = mpfe_eval_cpu_split.groupby(['pid', 'N_events', 'num_cpu', 'cpu/wall', 'segment', 'cpu_id', 'force_num_int'], as_index=False).sum()
### MPFE calculate
mpfe_calc = pd.concat([v for k, v in dfs_mp_ma.items() if 'RRMPFE_calculate_initialize' in k])
mpfe_calc.rename(columns={'RRMPFE_calculate_initialize_wall_s': 'walltime s'}, inplace=True)
mpfe_calc_total = mpfe_calc.groupby(['pid', 'N_events', 'num_cpu', 'force_num_int'], as_index=False).sum()
#### RooAbsTestStatistic TIMINGS
### RATS evaluate full (flag 2)
rats_eval_sp = dfs_sp['RATS_evaluate_full'].dropna()
rats_eval_ma = dfs_mp_ma['RATS_evaluate_full'].dropna()
# rats_eval_sl is not really a multi-process result, it is just the single process runs (the ppid output in RooFit is now set to -1 if it is not really a slave, for later runs)
# rats_eval_sl = dfs_mp_sl['RATS_evaluate_full'].dropna()
rats_eval = pd.concat([rats_eval_sp, rats_eval_ma])
rats_eval_total = rats_eval.groupby(['pid', 'N_events', 'num_cpu', 'mode', 'force_num_int'], as_index=False).sum()
### RATS evaluate per CPU iteration (multi-process only) (flag 3)
rats_eval_itcpu = rats_eval_itcpu_ma = dfs_mp_ma['RATS_evaluate_mpmaster_perCPU'].copy()
rats_eval_itcpu.rename(columns={'RATS_evaluate_mpmaster_it_wall_s': 'walltime s'}, inplace=True)
# rats_eval_itcpu is counted in the master process, the slaves do nothing (the ppid output is now removed from RooFit, for later runs)
# rats_eval_itcpu_sl = dfs_mp_sl['RATS_evaluate_mpmaster_perCPU']
rats_eval_itcpu_total = rats_eval_itcpu.groupby(['pid', 'N_events', 'num_cpu', 'it_nr', 'force_num_int'], as_index=False).sum()
"""
#### ANALYSIS
"""
# RATS evaluate full times
g = sns.factorplot(x='num_cpu', y='RATS_evaluate_wall_s', col='N_events', hue='mode', row='force_num_int', estimator=np.min, data=rats_eval_total, legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RATS::evaluate()')
savefig(g, savefig_dn / 'rats_eval.png')
# RATS evaluate itX times
g = sns.factorplot(x='num_cpu', y='walltime s', hue='it_nr', col='N_events', row='force_num_int', estimator=np.min, data=rats_eval_itcpu_total, legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of the iterations of the main for-loop in RATS::evaluate()')
savefig(g, savefig_dn / 'rats_eval_itcpu.png')
# MPFE evaluate timings (including "collect" time)
for segment in mpfe_eval_total.segment.unique():
g = sns.factorplot(x='num_cpu', y='time s', hue='cpu/wall', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_total[mpfe_eval_total.segment == segment], legend_out=False, sharey=False)
plt.subplots_adjust(top=0.95)
g.fig.suptitle('total timings of all calls to RRMPFE::evaluate(); "COLLECT"')
savefig(g, savefig_dn / f'mpfe_eval_{segment}.png')
# ... split by cpu id
g = sns.factorplot(x='num_cpu', y='time s', hue='cpu_id', col='N_events', row='force_num_int', estimator=np.min, data=mpfe_eval_cpu_split_total[(mpfe_eval_cpu_split_total['cpu/wall'] == 'wall')], legend_out=False, sharey=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RRMPFE::evaluate(); only wallclock and only all-segment timings')
savefig(g, savefig_dn / f'mpfe_eval_cpu_split.png')
# MPFE calculate timings ("dispatch" time)
g = sns.factorplot(x='num_cpu', y='walltime s', col='N_events', row='force_num_int', sharey='row', estimator=np.min, data=mpfe_calc_total, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('total wallclock timing of all calls to RRMPFE::calculate(); "DISPATCH"')
savefig(g, savefig_dn / 'mpfe_calc.png')
"""
# numerical integrals
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.min, data=df_numints, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- minima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations')
savefig(g, savefig_dn / 'numInts_min.png')
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.max, data=df_numints, legend_out=False)
plt.subplots_adjust(top=0.85)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- maxima of all integrations per plotted factor --- vertical bars: variation in different runs and iterations')
savefig(g, savefig_dn / 'numInts_max.png')
g = sns.factorplot(x='num_cpu', y='wall_s', col='N_events', sharey='row', row='time_num_ints/segment', estimator=np.sum, data=df_numints_max_by_iteration, legend_out=False)
plt.subplots_adjust(top=0.8)
g.fig.suptitle('wallclock timing of all timed numerical integrals --- sum of maximum of each iteration per run $\sum_{\mathrm{it}} \max_{\mathrm{core}}(t_{\mathrm{run,it,core}})$ --- vertical bars: variation in different runs')
savefig(g, savefig_dn / 'numInts_it_sum_max.png')
plt.show()
| apache-2.0 |
fayf/pyload | module/plugins/PluginManager.py | 26 | 13427 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay, RaNaN
"""
import re
import sys
from os import listdir, makedirs
from os.path import isfile, join, exists, abspath
from sys import version_info
from itertools import chain
from traceback import print_exc
from module.lib.SafeEval import const_eval as literal_eval
from module.ConfigParser import IGNORE
class PluginManager:
ROOT = "module.plugins."
USERROOT = "userplugins."
TYPES = ("crypter", "container", "hoster", "captcha", "accounts", "hooks", "internal")
PATTERN = re.compile(r'__pattern__.*=.*r("|\')([^"\']+)')
VERSION = re.compile(r'__version__.*=.*("|\')([0-9.]+)')
CONFIG = re.compile(r'__config__.*=.*\[([^\]]+)', re.MULTILINE)
DESC = re.compile(r'__description__.?=.?("|"""|\')([^"\']+)')
def __init__(self, core):
self.core = core
#self.config = self.core.config
self.log = core.log
self.plugins = {}
self.createIndex()
#register for import hook
sys.meta_path.append(self)
def createIndex(self):
"""create information for all plugins available"""
sys.path.append(abspath(""))
if not exists("userplugins"):
makedirs("userplugins")
if not exists(join("userplugins", "__init__.py")):
f = open(join("userplugins", "__init__.py"), "wb")
f.close()
self.plugins["crypter"] = self.crypterPlugins = self.parse("crypter", pattern=True)
self.plugins["container"] = self.containerPlugins = self.parse("container", pattern=True)
self.plugins["hoster"] = self.hosterPlugins = self.parse("hoster", pattern=True)
self.plugins["captcha"] = self.captchaPlugins = self.parse("captcha")
self.plugins["accounts"] = self.accountPlugins = self.parse("accounts")
self.plugins["hooks"] = self.hookPlugins = self.parse("hooks")
self.plugins["internal"] = self.internalPlugins = self.parse("internal")
self.log.debug("created index of plugins")
def parse(self, folder, pattern=False, home={}):
"""
returns dict with information
home contains parsed plugins from module.
{
name : {path, version, config, (pattern, re), (plugin, class)}
}
"""
plugins = {}
if home:
pfolder = join("userplugins", folder)
if not exists(pfolder):
makedirs(pfolder)
if not exists(join(pfolder, "__init__.py")):
f = open(join(pfolder, "__init__.py"), "wb")
f.close()
else:
pfolder = join(pypath, "module", "plugins", folder)
for f in listdir(pfolder):
if (isfile(join(pfolder, f)) and f.endswith(".py") or f.endswith("_25.pyc") or f.endswith(
"_26.pyc") or f.endswith("_27.pyc")) and not f.startswith("_"):
data = open(join(pfolder, f))
content = data.read()
data.close()
if f.endswith("_25.pyc") and version_info[0:2] != (2, 5):
continue
elif f.endswith("_26.pyc") and version_info[0:2] != (2, 6):
continue
elif f.endswith("_27.pyc") and version_info[0:2] != (2, 7):
continue
name = f[:-3]
if name[-1] == ".": name = name[:-4]
version = self.VERSION.findall(content)
if version:
version = float(version[0][1])
else:
version = 0
# home contains plugins from pyload root
if home and name in home:
if home[name]["v"] >= version:
continue
if name in IGNORE or (folder, name) in IGNORE:
continue
plugins[name] = {}
plugins[name]["v"] = version
module = f.replace(".pyc", "").replace(".py", "")
# the plugin is loaded from user directory
plugins[name]["user"] = True if home else False
plugins[name]["name"] = module
if pattern:
pattern = self.PATTERN.findall(content)
if pattern:
pattern = pattern[0][1]
else:
pattern = "^unmachtable$"
plugins[name]["pattern"] = pattern
try:
plugins[name]["re"] = re.compile(pattern)
except:
self.log.error(_("%s has a invalid pattern.") % name)
# internals have no config
if folder == "internal":
self.core.config.deleteConfig(name)
continue
config = self.CONFIG.findall(content)
if config:
config = literal_eval(config[0].strip().replace("\n", "").replace("\r", ""))
desc = self.DESC.findall(content)
desc = desc[0][1] if desc else ""
if type(config[0]) == tuple:
config = [list(x) for x in config]
else:
config = [list(config)]
if folder == "hooks":
append = True
for item in config:
if item[0] == "activated": append = False
# activated flag missing
if append: config.append(["activated", "bool", "Activated", False])
try:
self.core.config.addPluginConfig(name, config, desc)
except:
self.log.error("Invalid config in %s: %s" % (name, config))
elif folder == "hooks": #force config creation
desc = self.DESC.findall(content)
desc = desc[0][1] if desc else ""
config = (["activated", "bool", "Activated", False],)
try:
self.core.config.addPluginConfig(name, config, desc)
except:
self.log.error("Invalid config in %s: %s" % (name, config))
if not home:
temp = self.parse(folder, pattern, plugins)
plugins.update(temp)
return plugins
def parseUrls(self, urls):
"""parse plugins for given list of urls"""
last = None
res = [] # tupels of (url, plugin)
for url in urls:
if type(url) not in (str, unicode, buffer): continue
found = False
if last and last[1]["re"].match(url):
res.append((url, last[0]))
continue
for name, value in chain(self.crypterPlugins.iteritems(), self.hosterPlugins.iteritems(),
self.containerPlugins.iteritems()):
if value["re"].match(url):
res.append((url, name))
last = (name, value)
found = True
break
if not found:
res.append((url, "BasePlugin"))
return res
def findPlugin(self, name, pluginlist=("hoster", "crypter", "container")):
for ptype in pluginlist:
if name in self.plugins[ptype]:
return self.plugins[ptype][name], ptype
return None, None
def getPlugin(self, name, original=False):
"""return plugin module from hoster|decrypter|container"""
plugin, type = self.findPlugin(name)
if not plugin:
self.log.warning("Plugin %s not found." % name)
plugin = self.hosterPlugins["BasePlugin"]
if "new_module" in plugin and not original:
return plugin["new_module"]
return self.loadModule(type, name)
def getPluginName(self, name):
""" used to obtain new name if other plugin was injected"""
plugin, type = self.findPlugin(name)
if "new_name" in plugin:
return plugin["new_name"]
return name
def loadModule(self, type, name):
""" Returns loaded module for plugin
:param type: plugin type, subfolder of module.plugins
:param name:
"""
plugins = self.plugins[type]
if name in plugins:
if "module" in plugins[name]: return plugins[name]["module"]
try:
module = __import__(self.ROOT + "%s.%s" % (type, plugins[name]["name"]), globals(), locals(),
plugins[name]["name"])
plugins[name]["module"] = module #cache import, maybe unneeded
return module
except Exception, e:
self.log.error(_("Error importing %(name)s: %(msg)s") % {"name": name, "msg": str(e)})
if self.core.debug:
print_exc()
def loadClass(self, type, name):
"""Returns the class of a plugin with the same name"""
module = self.loadModule(type, name)
if module: return getattr(module, name)
def getAccountPlugins(self):
"""return list of account plugin names"""
return self.accountPlugins.keys()
def find_module(self, fullname, path=None):
#redirecting imports if necesarry
if fullname.startswith(self.ROOT) or fullname.startswith(self.USERROOT): #seperate pyload plugins
if fullname.startswith(self.USERROOT): user = 1
else: user = 0 #used as bool and int
split = fullname.split(".")
if len(split) != 4 - user: return
type, name = split[2 - user:4 - user]
if type in self.plugins and name in self.plugins[type]:
#userplugin is a newer version
if not user and self.plugins[type][name]["user"]:
return self
#imported from userdir, but pyloads is newer
if user and not self.plugins[type][name]["user"]:
return self
def load_module(self, name, replace=True):
if name not in sys.modules: #could be already in modules
if replace:
if self.ROOT in name:
newname = name.replace(self.ROOT, self.USERROOT)
else:
newname = name.replace(self.USERROOT, self.ROOT)
else: newname = name
base, plugin = newname.rsplit(".", 1)
self.log.debug("Redirected import %s -> %s" % (name, newname))
module = __import__(newname, globals(), locals(), [plugin])
#inject under new an old name
sys.modules[name] = module
sys.modules[newname] = module
return sys.modules[name]
def reloadPlugins(self, type_plugins):
""" reloads and reindexes plugins """
if not type_plugins: return False
self.log.debug("Request reload of plugins: %s" % type_plugins)
as_dict = {}
for t,n in type_plugins:
if t in as_dict:
as_dict[t].append(n)
else:
as_dict[t] = [n]
# we do not reload hooks or internals, would cause to much side effects
if "hooks" in as_dict or "internal" in as_dict:
return False
for type in as_dict.iterkeys():
for plugin in as_dict[type]:
if plugin in self.plugins[type]:
if "module" in self.plugins[type][plugin]:
self.log.debug("Reloading %s" % plugin)
reload(self.plugins[type][plugin]["module"])
#index creation
self.plugins["crypter"] = self.crypterPlugins = self.parse("crypter", pattern=True)
self.plugins["container"] = self.containerPlugins = self.parse("container", pattern=True)
self.plugins["hoster"] = self.hosterPlugins = self.parse("hoster", pattern=True)
self.plugins["captcha"] = self.captchaPlugins = self.parse("captcha")
self.plugins["accounts"] = self.accountPlugins = self.parse("accounts")
if "accounts" in as_dict: #accounts needs to be reloaded
self.core.accountManager.initPlugins()
self.core.scheduler.addJob(0, self.core.accountManager.getAccountInfos)
return True
if __name__ == "__main__":
_ = lambda x: x
pypath = "/home/christian/Projekte/pyload-0.4/module/plugins"
from time import time
p = PluginManager(None)
a = time()
test = ["http://www.youtube.com/watch?v=%s" % x for x in range(0, 100)]
print p.parseUrls(test)
b = time()
print b - a, "s"
| gpl-3.0 |
Tianhao-Gu/kb_uploadmethods | lib/installed_clients/KBaseFeatureValuesServiceClient.py | 2 | 63225 | # -*- coding: utf-8 -*-
############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
from __future__ import print_function
# the following is a hack to get the baseclient to import whether we're in a
# package or not. This makes pep8 unhappy hence the annotations.
try:
# baseclient and this client are in a package
from .baseclient import BaseClient as _BaseClient # @UnusedImport
except ImportError:
# no they aren't
from baseclient import BaseClient as _BaseClient # @Reimport
class KBaseFeatureValues(object):
def __init__(
self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False,
auth_svc='https://ci.kbase.us/services/auth/api/legacy/KBase/Sessions/Login',
service_ver='release'):
if url is None:
url = 'https://kbase.us/services/service_wizard'
self._service_ver = service_ver
self._client = _BaseClient(
url, timeout=timeout, user_id=user_id, password=password,
token=token, ignore_authrc=ignore_authrc,
trust_all_ssl_certificates=trust_all_ssl_certificates,
auth_svc=auth_svc,
lookup_url=True)
def estimate_k(self, params, context=None):
"""
Used as an analysis step before generating clusters using K-means clustering, this method
provides an estimate of K by [...]
:param params: instance of type "EstimateKParams" -> structure:
parameter "input_matrix" of type "ws_matrix_id" (A workspace ID
that references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter "min_k"
of Long, parameter "max_k" of Long, parameter "max_iter" of Long,
parameter "random_seed" of Long, parameter "neighb_size" of Long,
parameter "max_items" of Long, parameter "out_workspace" of
String, parameter "out_estimate_result" of String
"""
return self._client.call_method('KBaseFeatureValues.estimate_k',
[params], self._service_ver, context)
def estimate_k_new(self, params, context=None):
"""
Used as an analysis step before generating clusters using K-means clustering, this method
provides an estimate of K by [...]
:param params: instance of type "EstimateKParamsNew" -> structure:
parameter "input_matrix" of type "ws_matrix_id" (A workspace ID
that references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter "min_k"
of Long, parameter "max_k" of Long, parameter "criterion" of
String, parameter "usepam" of type "boolean" (Indicates true or
false values, false = 0, true = 1 @range [0,1]), parameter "alpha"
of Double, parameter "diss" of type "boolean" (Indicates true or
false values, false = 0, true = 1 @range [0,1]), parameter
"random_seed" of Long, parameter "out_workspace" of String,
parameter "out_estimate_result" of String
"""
return self._client.call_method('KBaseFeatureValues.estimate_k_new',
[params], self._service_ver, context)
def cluster_k_means(self, params, context=None):
"""
Clusters features by K-means clustering.
:param params: instance of type "ClusterKMeansParams" -> structure:
parameter "k" of Long, parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"n_start" of Long, parameter "max_iter" of Long, parameter
"random_seed" of Long, parameter "algorithm" of String, parameter
"out_workspace" of String, parameter "out_clusterset_id" of String
"""
return self._client.call_method('KBaseFeatureValues.cluster_k_means',
[params], self._service_ver, context)
def cluster_hierarchical(self, params, context=None):
"""
Clusters features by hierarchical clustering.
:param params: instance of type "ClusterHierarchicalParams" ->
structure: parameter "distance_metric" of String, parameter
"linkage_criteria" of String, parameter "feature_height_cutoff" of
Double, parameter "condition_height_cutoff" of Double, parameter
"max_items" of Long, parameter "input_data" of type "ws_matrix_id"
(A workspace ID that references a Float2DMatrix wrapper data
object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"algorithm" of String, parameter "out_workspace" of String,
parameter "out_clusterset_id" of String
"""
return self._client.call_method('KBaseFeatureValues.cluster_hierarchical',
[params], self._service_ver, context)
def clusters_from_dendrogram(self, params, context=None):
"""
Given a FeatureClusters with a dendogram built from a hierarchical clustering
method, this function creates new clusters by cutting the dendogram at
a specific hieght or by some other approach.
:param params: instance of type "ClustersFromDendrogramParams" ->
structure: parameter "feature_height_cutoff" of Double, parameter
"condition_height_cutoff" of Double, parameter "input_data" of
type "ws_featureclusters_id" (The workspace ID of a
FeatureClusters data object. @id ws
KBaseFeatureValues.FeatureClusters), parameter "out_workspace" of
String, parameter "out_clusterset_id" of String
"""
return self._client.call_method('KBaseFeatureValues.clusters_from_dendrogram',
[params], self._service_ver, context)
def evaluate_clusterset_quality(self, params, context=None):
"""
Given a FeatureClusters with a dendogram built from a hierarchical clustering
method, this function creates new clusters by cutting the dendogram at
a specific hieght or by some other approach.
:param params: instance of type "EvaluateClustersetQualityParams" ->
structure: parameter "input_clusterset" of type
"ws_featureclusters_id" (The workspace ID of a FeatureClusters
data object. @id ws KBaseFeatureValues.FeatureClusters), parameter
"out_workspace" of String, parameter "out_report_id" of String
"""
return self._client.call_method('KBaseFeatureValues.evaluate_clusterset_quality',
[params], self._service_ver, context)
def validate_matrix(self, params, context=None):
"""
:param params: instance of type "ValidateMatrixParams" (method -
optional field specifying special type of validation necessary for
particular clustering method.) -> structure: parameter "method" of
String, parameter "input_data" of type "ws_matrix_id" (A workspace
ID that references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix)
"""
return self._client.call_method('KBaseFeatureValues.validate_matrix',
[params], self._service_ver, context)
def correct_matrix(self, params, context=None):
"""
:param params: instance of type "CorrectMatrixParams" (transform_type
- type of matrix change (one of: add, multiply, normalize,
missing, ?). transform_value - optional field defining volume of
change if it's necessary for chosen transform_type.) -> structure:
parameter "transform_type" of String, parameter "transform_value"
of Double, parameter "input_data" of type "ws_matrix_id" (A
workspace ID that references a Float2DMatrix wrapper data object.
@id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"out_workspace" of String, parameter "out_matrix_id" of String
"""
return self._client.call_method('KBaseFeatureValues.correct_matrix',
[params], self._service_ver, context)
def reconnect_matrix_to_genome(self, params, context=None):
"""
:param params: instance of type "ReconnectMatrixToGenomeParams"
(out_matrix_id - optional target matrix object name (if not
specified then target object overwrites input_data).) ->
structure: parameter "input_data" of type "ws_matrix_id" (A
workspace ID that references a Float2DMatrix wrapper data object.
@id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"genome_ref" of type "ws_genome_id" (The workspace ID for a Genome
data object. @id ws KBaseGenomes.Genome), parameter
"out_workspace" of String, parameter "out_matrix_id" of String
"""
return self._client.call_method('KBaseFeatureValues.reconnect_matrix_to_genome',
[params], self._service_ver, context)
def build_feature_set(self, params, context=None):
"""
:param params: instance of type "BuildFeatureSetParams"
(base_feature_set - optional field, description - optional field.)
-> structure: parameter "genome" of type "ws_genome_id" (The
workspace ID for a Genome data object. @id ws
KBaseGenomes.Genome), parameter "feature_ids" of String, parameter
"feature_ids_custom" of String, parameter "base_feature_set" of
type "ws_featureset_id" (The workspace ID of a FeatureSet data
object. @id ws KBaseCollections.FeatureSet), parameter
"description" of String, parameter "out_workspace" of String,
parameter "output_feature_set" of String
"""
return self._client.call_method('KBaseFeatureValues.build_feature_set',
[params], self._service_ver, context)
def get_matrix_descriptor(self, GetMatrixDescriptorParams, context=None):
"""
:param GetMatrixDescriptorParams: instance of type
"GetMatrixDescriptorParams" (Parameters to retrieve
MatrixDescriptor) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix)
:returns: instance of type "MatrixDescriptor" (General info about
matrix, including genome name that needs to be extracted from the
genome object) -> structure: parameter "matrix_id" of String,
parameter "matrix_name" of String, parameter "matrix_description"
of String, parameter "genome_id" of String, parameter
"genome_name" of String, parameter "rows_count" of Long, parameter
"columns_count" of Long, parameter "scale" of String, parameter
"type" of String, parameter "row_normalization" of String,
parameter "col_normalization" of String
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_descriptor',
[GetMatrixDescriptorParams], self._service_ver, context)
def get_matrix_row_descriptors(self, GetMatrixItemDescriptorsParams, context=None):
"""
:param GetMatrixItemDescriptorsParams: instance of type
"GetMatrixItemDescriptorsParams" (Parameters to get basic
properties for items from the Float2D type of matrices. input_data
- worskapce reference to the ExpressionMatrix object (later we
should allow to work with other Float2DMatrix-like matrices, e.g.
fitness) item_indeces - indeces of items for whch descriptors
should be built. Either item_indeces or item_ids should be
provided. If both are provided, item_indeces will be used.
item_ids - ids of items for whch descriptors should be built.
Either item_indeces or item_ids should be provided. If both are
provided, item_indeces will be used. requested_property_types -
list of property types to be populated for each item. Currently
supported property types are: 'function') -> structure: parameter
"input_data" of type "ws_matrix_id" (A workspace ID that
references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces" of list of Long, parameter "item_ids" of list of
String, parameter "requested_property_types" of list of String
:returns: instance of list of type "ItemDescriptor" (Basic
information about a particular item in a collection. index - index
of the item id - id of the item name - name of the item
description - description of the item properties - additinal
proerties: key - property type, value - value. For instance, if
item represents a feature, the property type can be a type of
feature annotation in a genome, e.g. 'function', 'strand', etc) ->
structure: parameter "index" of Long, parameter "id" of String,
parameter "name" of String, parameter "description" of String,
parameter "properties" of mapping from String to String
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_row_descriptors',
[GetMatrixItemDescriptorsParams], self._service_ver, context)
def get_matrix_column_descriptors(self, GetMatrixItemDescriptorsParams, context=None):
"""
:param GetMatrixItemDescriptorsParams: instance of type
"GetMatrixItemDescriptorsParams" (Parameters to get basic
properties for items from the Float2D type of matrices. input_data
- worskapce reference to the ExpressionMatrix object (later we
should allow to work with other Float2DMatrix-like matrices, e.g.
fitness) item_indeces - indeces of items for whch descriptors
should be built. Either item_indeces or item_ids should be
provided. If both are provided, item_indeces will be used.
item_ids - ids of items for whch descriptors should be built.
Either item_indeces or item_ids should be provided. If both are
provided, item_indeces will be used. requested_property_types -
list of property types to be populated for each item. Currently
supported property types are: 'function') -> structure: parameter
"input_data" of type "ws_matrix_id" (A workspace ID that
references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces" of list of Long, parameter "item_ids" of list of
String, parameter "requested_property_types" of list of String
:returns: instance of list of type "ItemDescriptor" (Basic
information about a particular item in a collection. index - index
of the item id - id of the item name - name of the item
description - description of the item properties - additinal
proerties: key - property type, value - value. For instance, if
item represents a feature, the property type can be a type of
feature annotation in a genome, e.g. 'function', 'strand', etc) ->
structure: parameter "index" of Long, parameter "id" of String,
parameter "name" of String, parameter "description" of String,
parameter "properties" of mapping from String to String
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_column_descriptors',
[GetMatrixItemDescriptorsParams], self._service_ver, context)
def get_matrix_rows_stat(self, GetMatrixItemsStatParams, context=None):
"""
:param GetMatrixItemsStatParams: instance of type
"GetMatrixItemsStatParams" (Parameters to get statics for a set of
items from the Float2D type of matrices. input_data - worskapce
reference to the ExpressionMatrix object (later we should allow to
work with other Float2DMatrix-like matrices, e.g. fitness)
item_indeces_for - indeces of items FOR whch statistics should be
calculated item_indeces_on - indeces of items ON whch statistics
should be calculated fl_indeces_on - defines whether the
indeces_on should be populated in ItemStat objects. The default
value = 0.) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces_for" of list of Long, parameter "item_indeces_on" of
list of Long, parameter "fl_indeces_on" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range [0,1])
:returns: instance of list of type "ItemStat" (Statistics for a given
item in a collection (defined by index) , calculated on the
associated vector of values. Typical example is 2D matrix: item is
a given row, and correposnding values from all columns is an
associated vector. In relation to ExpressionMatrix we can think
about a gene (defined by row index in Float2DMatrix) and a vector
of expression values across all (or a subset of) conditions. In
this case, index_for - index of a row representing a gene in the
Float2DMatrix, indeces_on - indeces of columns represnting a set
of conditions on which we want to calculate statistics. index_for
- index of the item in a collection FOR which all statitics is
collected indeces_on - indeces of items in the associated vector
ON which the statistics is calculated size - number of elements in
the associated vector avg - mean value for a given item across all
elements in the associated vector min - min value for a given item
across all elements in the associated vector max - max value for a
given item across all elements in the associated vector std - std
value for a given item across all elements in the associated
vector missing_values - number of missing values for a given item
across all elements in the associated vector) -> structure:
parameter "index_for" of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avg" of Double,
parameter "min" of Double, parameter "max" of Double, parameter
"std" of Double, parameter "missing_values" of Long
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_rows_stat',
[GetMatrixItemsStatParams], self._service_ver, context)
def get_matrix_columns_stat(self, GetMatrixItemsStatParams, context=None):
"""
:param GetMatrixItemsStatParams: instance of type
"GetMatrixItemsStatParams" (Parameters to get statics for a set of
items from the Float2D type of matrices. input_data - worskapce
reference to the ExpressionMatrix object (later we should allow to
work with other Float2DMatrix-like matrices, e.g. fitness)
item_indeces_for - indeces of items FOR whch statistics should be
calculated item_indeces_on - indeces of items ON whch statistics
should be calculated fl_indeces_on - defines whether the
indeces_on should be populated in ItemStat objects. The default
value = 0.) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces_for" of list of Long, parameter "item_indeces_on" of
list of Long, parameter "fl_indeces_on" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range [0,1])
:returns: instance of list of type "ItemStat" (Statistics for a given
item in a collection (defined by index) , calculated on the
associated vector of values. Typical example is 2D matrix: item is
a given row, and correposnding values from all columns is an
associated vector. In relation to ExpressionMatrix we can think
about a gene (defined by row index in Float2DMatrix) and a vector
of expression values across all (or a subset of) conditions. In
this case, index_for - index of a row representing a gene in the
Float2DMatrix, indeces_on - indeces of columns represnting a set
of conditions on which we want to calculate statistics. index_for
- index of the item in a collection FOR which all statitics is
collected indeces_on - indeces of items in the associated vector
ON which the statistics is calculated size - number of elements in
the associated vector avg - mean value for a given item across all
elements in the associated vector min - min value for a given item
across all elements in the associated vector max - max value for a
given item across all elements in the associated vector std - std
value for a given item across all elements in the associated
vector missing_values - number of missing values for a given item
across all elements in the associated vector) -> structure:
parameter "index_for" of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avg" of Double,
parameter "min" of Double, parameter "max" of Double, parameter
"std" of Double, parameter "missing_values" of Long
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_columns_stat',
[GetMatrixItemsStatParams], self._service_ver, context)
def get_matrix_row_sets_stat(self, GetMatrixSetsStatParams, context=None):
"""
:param GetMatrixSetsStatParams: instance of type
"GetMatrixSetsStatParams" (Parameters to retrieve statistics for
set of sets of elements. In relation to ExpressionMatrix, these
parameters can be used to retrive sparklines for several gene
clusters generated on the same ExpressionMatrix in one call.
params - list of params to retrive statistics for a set of items
from the Float2D type of matrices.) -> structure: parameter
"params" of list of type "GetMatrixSetStatParams" (Parameters to
get statistics for a set of items from the Float2D type of
matrices in a form of ItemSetStat. This version is more flexible
and will be later used to retrieve set of sets of elements.
input_data - worskapce reference to the ExpressionMatrix object
(later we should allow to work with other Float2DMatrix-like
matrices, e.g. fitness) item_indeces_for - indeces of items FOR
wich statistics should be calculated item_indeces_on - indeces of
items ON wich statistics should be calculated fl_indeces_on -
defines whether the indeces_on should be populated in SetStat
objects. The default value = 0. fl_indeces_for - defines whether
the indeces_for should be populated in SetStat objects. The
default value = 0. fl_avgs - defines whether the avgs should be
populated in SetStat objects. The default value = 0. fl_mins -
defines whether the mins should be populated in SetStat objects.
The default value = 0. fl_maxs - defines whether the maxs should
be populated in SetStat objects. The default value = 0. fl_stds -
defines whether the stds should be populated in SetStat objects.
The default value = 0. fl_missing_values - defines whether the
missing_values should be populated in SetStat objects. The default
value = 0.) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces_for" of list of Long, parameter "item_indeces_on" of
list of Long, parameter "fl_indeces_on" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_indeces_for" of type "boolean" (Indicates
true or false values, false = 0, true = 1 @range [0,1]), parameter
"fl_avgs" of type "boolean" (Indicates true or false values, false
= 0, true = 1 @range [0,1]), parameter "fl_mins" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_maxs" of type "boolean" (Indicates true or
false values, false = 0, true = 1 @range [0,1]), parameter
"fl_stds" of type "boolean" (Indicates true or false values, false
= 0, true = 1 @range [0,1]), parameter "fl_missing_values" of type
"boolean" (Indicates true or false values, false = 0, true = 1
@range [0,1])
:returns: instance of list of type "ItemSetStat" (Same as ItemStat,
but for a set of Items. Actually it can be modeled as a
list<ItemStat>, but this way we can optimize data transfer in two
ways: 1. In parameters we can specify that we need a subset of
properties, e.g. only "avgs". 2. No field names in json (avg, min,
max, etc) for each element in the list indeces_for - indeces of
items in a collection FOR which all statitics is collected
indeces_on - indeces of items in the associated vector ON which
the statistics is calculated size - number of elements defined by
indeces_on (expected to be the same for all items defined by
indeces_for) avgs - mean values for each item defined by
indeces_for across all elements defined by indeces_on mins - min
values for each item defined by indeces_for across all elements
defined by indeces_on maxs - max values for each item defined by
indeces_for across all elements defined by indeces_on stds - std
values for each item defined by indeces_for across all elements
defined by indeces_on missing_values - number of missing values
for each item defined by indeces_for across all elements defined
by indeces_on) -> structure: parameter "indeces_for" of list of
Long, parameter "indeces_on" of list of Long, parameter "size" of
Long, parameter "avgs" of list of Double, parameter "mins" of list
of Double, parameter "maxs" of list of Double, parameter "stds" of
list of Double, parameter "missing_values" of list of Long
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_row_sets_stat',
[GetMatrixSetsStatParams], self._service_ver, context)
def get_matrix_column_sets_stat(self, GetMatrixSetsStatParams, context=None):
"""
:param GetMatrixSetsStatParams: instance of type
"GetMatrixSetsStatParams" (Parameters to retrieve statistics for
set of sets of elements. In relation to ExpressionMatrix, these
parameters can be used to retrive sparklines for several gene
clusters generated on the same ExpressionMatrix in one call.
params - list of params to retrive statistics for a set of items
from the Float2D type of matrices.) -> structure: parameter
"params" of list of type "GetMatrixSetStatParams" (Parameters to
get statistics for a set of items from the Float2D type of
matrices in a form of ItemSetStat. This version is more flexible
and will be later used to retrieve set of sets of elements.
input_data - worskapce reference to the ExpressionMatrix object
(later we should allow to work with other Float2DMatrix-like
matrices, e.g. fitness) item_indeces_for - indeces of items FOR
wich statistics should be calculated item_indeces_on - indeces of
items ON wich statistics should be calculated fl_indeces_on -
defines whether the indeces_on should be populated in SetStat
objects. The default value = 0. fl_indeces_for - defines whether
the indeces_for should be populated in SetStat objects. The
default value = 0. fl_avgs - defines whether the avgs should be
populated in SetStat objects. The default value = 0. fl_mins -
defines whether the mins should be populated in SetStat objects.
The default value = 0. fl_maxs - defines whether the maxs should
be populated in SetStat objects. The default value = 0. fl_stds -
defines whether the stds should be populated in SetStat objects.
The default value = 0. fl_missing_values - defines whether the
missing_values should be populated in SetStat objects. The default
value = 0.) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"item_indeces_for" of list of Long, parameter "item_indeces_on" of
list of Long, parameter "fl_indeces_on" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_indeces_for" of type "boolean" (Indicates
true or false values, false = 0, true = 1 @range [0,1]), parameter
"fl_avgs" of type "boolean" (Indicates true or false values, false
= 0, true = 1 @range [0,1]), parameter "fl_mins" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_maxs" of type "boolean" (Indicates true or
false values, false = 0, true = 1 @range [0,1]), parameter
"fl_stds" of type "boolean" (Indicates true or false values, false
= 0, true = 1 @range [0,1]), parameter "fl_missing_values" of type
"boolean" (Indicates true or false values, false = 0, true = 1
@range [0,1])
:returns: instance of list of type "ItemSetStat" (Same as ItemStat,
but for a set of Items. Actually it can be modeled as a
list<ItemStat>, but this way we can optimize data transfer in two
ways: 1. In parameters we can specify that we need a subset of
properties, e.g. only "avgs". 2. No field names in json (avg, min,
max, etc) for each element in the list indeces_for - indeces of
items in a collection FOR which all statitics is collected
indeces_on - indeces of items in the associated vector ON which
the statistics is calculated size - number of elements defined by
indeces_on (expected to be the same for all items defined by
indeces_for) avgs - mean values for each item defined by
indeces_for across all elements defined by indeces_on mins - min
values for each item defined by indeces_for across all elements
defined by indeces_on maxs - max values for each item defined by
indeces_for across all elements defined by indeces_on stds - std
values for each item defined by indeces_for across all elements
defined by indeces_on missing_values - number of missing values
for each item defined by indeces_for across all elements defined
by indeces_on) -> structure: parameter "indeces_for" of list of
Long, parameter "indeces_on" of list of Long, parameter "size" of
Long, parameter "avgs" of list of Double, parameter "mins" of list
of Double, parameter "maxs" of list of Double, parameter "stds" of
list of Double, parameter "missing_values" of list of Long
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_column_sets_stat',
[GetMatrixSetsStatParams], self._service_ver, context)
def get_matrix_stat(self, GetMatrixStatParams, context=None):
"""
:param GetMatrixStatParams: instance of type "GetMatrixStatParams"
(Parameters to retrieve MatrixStat) -> structure: parameter
"input_data" of type "ws_matrix_id" (A workspace ID that
references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix)
:returns: instance of type "MatrixStat" (Data type for bulk queries.
It provides all necessary data to visulize basic properties of
ExpressionMatrix mtx_descriptor - decriptor of the matrix as a
whole row_descriptors - descriptor for each row in the matrix
(provides basic properties of the features) column_descriptors -
descriptor for each column in the matrix (provides basic
properties of the conditions) row_stats - basic statistics for
each row (feature) in the matrix, like mean, min, max, etc acorss
all columns (conditions) column_stats - basic statistics for each
row (feature) in the matrix, like mean, min, max, etc across all
rows (features)) -> structure: parameter "mtx_descriptor" of type
"MatrixDescriptor" (General info about matrix, including genome
name that needs to be extracted from the genome object) ->
structure: parameter "matrix_id" of String, parameter
"matrix_name" of String, parameter "matrix_description" of String,
parameter "genome_id" of String, parameter "genome_name" of
String, parameter "rows_count" of Long, parameter "columns_count"
of Long, parameter "scale" of String, parameter "type" of String,
parameter "row_normalization" of String, parameter
"col_normalization" of String, parameter "row_descriptors" of list
of type "ItemDescriptor" (Basic information about a particular
item in a collection. index - index of the item id - id of the
item name - name of the item description - description of the item
properties - additinal proerties: key - property type, value -
value. For instance, if item represents a feature, the property
type can be a type of feature annotation in a genome, e.g.
'function', 'strand', etc) -> structure: parameter "index" of
Long, parameter "id" of String, parameter "name" of String,
parameter "description" of String, parameter "properties" of
mapping from String to String, parameter "column_descriptors" of
list of type "ItemDescriptor" (Basic information about a
particular item in a collection. index - index of the item id - id
of the item name - name of the item description - description of
the item properties - additinal proerties: key - property type,
value - value. For instance, if item represents a feature, the
property type can be a type of feature annotation in a genome,
e.g. 'function', 'strand', etc) -> structure: parameter "index" of
Long, parameter "id" of String, parameter "name" of String,
parameter "description" of String, parameter "properties" of
mapping from String to String, parameter "row_stats" of list of
type "ItemStat" (Statistics for a given item in a collection
(defined by index) , calculated on the associated vector of
values. Typical example is 2D matrix: item is a given row, and
correposnding values from all columns is an associated vector. In
relation to ExpressionMatrix we can think about a gene (defined by
row index in Float2DMatrix) and a vector of expression values
across all (or a subset of) conditions. In this case, index_for -
index of a row representing a gene in the Float2DMatrix,
indeces_on - indeces of columns represnting a set of conditions on
which we want to calculate statistics. index_for - index of the
item in a collection FOR which all statitics is collected
indeces_on - indeces of items in the associated vector ON which
the statistics is calculated size - number of elements in the
associated vector avg - mean value for a given item across all
elements in the associated vector min - min value for a given item
across all elements in the associated vector max - max value for a
given item across all elements in the associated vector std - std
value for a given item across all elements in the associated
vector missing_values - number of missing values for a given item
across all elements in the associated vector) -> structure:
parameter "index_for" of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avg" of Double,
parameter "min" of Double, parameter "max" of Double, parameter
"std" of Double, parameter "missing_values" of Long, parameter
"column_stats" of list of type "ItemStat" (Statistics for a given
item in a collection (defined by index) , calculated on the
associated vector of values. Typical example is 2D matrix: item is
a given row, and correposnding values from all columns is an
associated vector. In relation to ExpressionMatrix we can think
about a gene (defined by row index in Float2DMatrix) and a vector
of expression values across all (or a subset of) conditions. In
this case, index_for - index of a row representing a gene in the
Float2DMatrix, indeces_on - indeces of columns represnting a set
of conditions on which we want to calculate statistics. index_for
- index of the item in a collection FOR which all statitics is
collected indeces_on - indeces of items in the associated vector
ON which the statistics is calculated size - number of elements in
the associated vector avg - mean value for a given item across all
elements in the associated vector min - min value for a given item
across all elements in the associated vector max - max value for a
given item across all elements in the associated vector std - std
value for a given item across all elements in the associated
vector missing_values - number of missing values for a given item
across all elements in the associated vector) -> structure:
parameter "index_for" of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avg" of Double,
parameter "min" of Double, parameter "max" of Double, parameter
"std" of Double, parameter "missing_values" of Long
"""
return self._client.call_method('KBaseFeatureValues.get_matrix_stat',
[GetMatrixStatParams], self._service_ver, context)
def get_submatrix_stat(self, GetSubmatrixStatParams, context=None):
"""
:param GetSubmatrixStatParams: instance of type
"GetSubmatrixStatParams" (Parameters to retrieve SubmatrixStat
input_data - reference to the source matrix row_indeces - indeces
defining a subset of matrix rows. Either row_indeces (highest
priorery) or row_ids should be provided. row_ids - ids defining a
subset of matrix rows. Either row_indeces (highest priorery) or
row_ids should be provided. column_indeces - indeces defining a
subset of matrix columns. Either column_indeces (highest priorery)
or column_ids should be provided. column_ids - ids defining a
subset of matrix columns. Either column_indeces (highest priorery)
or column_ids should be provided. fl_row_set_stats - defines
whether row_set_stats should be calculated in include in the
SubmatrixStat. Default value = 0 fl_column_set_stat - defines
whether column_set_stat should be calculated in include in the
SubmatrixStat. Default value = 0 fl_mtx_row_set_stat - defines
whether mtx_row_set_stat should be calculated in include in the
SubmatrixStat. Default value = 0 fl_mtx_column_set_stat - defines
whether mtx_column_set_stat should be calculated in include in the
SubmatrixStat. Default value = 0 fl_row_pairwise_correlation -
defines whether row_pairwise_correlation should be calculated in
include in the SubmatrixStat. Default value = 0
fl_column_pairwise_correlation - defines whether
column_pairwise_correlation should be calculated in include in the
SubmatrixStat. Default value = 0 fl_values - defines whether
values should be calculated in include in the SubmatrixStat.
Default value = 0) -> structure: parameter "input_data" of type
"ws_matrix_id" (A workspace ID that references a Float2DMatrix
wrapper data object. @id ws KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"row_indeces" of list of Long, parameter "row_ids" of list of
String, parameter "column_indeces" of list of Long, parameter
"column_ids" of list of String, parameter "fl_row_set_stats" of
type "boolean" (Indicates true or false values, false = 0, true =
1 @range [0,1]), parameter "fl_column_set_stat" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_mtx_row_set_stat" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_mtx_column_set_stat" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_row_pairwise_correlation" of type "boolean"
(Indicates true or false values, false = 0, true = 1 @range
[0,1]), parameter "fl_column_pairwise_correlation" of type
"boolean" (Indicates true or false values, false = 0, true = 1
@range [0,1]), parameter "fl_values" of type "boolean" (Indicates
true or false values, false = 0, true = 1 @range [0,1])
:returns: instance of type "SubmatrixStat" (Data type for bulk
queries. It provides various statistics calculated on sub-matrix.
The sub-matrix is defined by a subset of rows and columns via
parameters. Parameters will also define the required types of
statics. mtx_descriptor - basic properties of the source matrix
row_descriptors - descriptor for each row in a subset defined in
the parameters column_descriptors - descriptor for each column in
a subset defined in the parameters row_set_stats - basic
statistics for a subset of rows calculated on a subset of columns
column_set_stat - basic statistics for a subset of columns
calculated on a subset of rows mtx_row_set_stat - basic statistics
for a subset of rows calculated on ALL columns in the matrix (can
be used as a backgound in comparison with row_set_stats)
mtx_column_set_stat - basic statistics for a subset of columns
calculated on ALL rows in the matrix (can be used as a backgound
in comparison with column_set_stat) row_pairwise_correlation -
pariwise perason correlation for a subset of rows (features)
column_pairwise_correlation - pariwise perason correlation for a
subset of columns (conditions) values - sub-matrix representing
actual values for a given subset of rows and a subset of columns)
-> structure: parameter "mtx_descriptor" of type
"MatrixDescriptor" (General info about matrix, including genome
name that needs to be extracted from the genome object) ->
structure: parameter "matrix_id" of String, parameter
"matrix_name" of String, parameter "matrix_description" of String,
parameter "genome_id" of String, parameter "genome_name" of
String, parameter "rows_count" of Long, parameter "columns_count"
of Long, parameter "scale" of String, parameter "type" of String,
parameter "row_normalization" of String, parameter
"col_normalization" of String, parameter "row_descriptors" of list
of type "ItemDescriptor" (Basic information about a particular
item in a collection. index - index of the item id - id of the
item name - name of the item description - description of the item
properties - additinal proerties: key - property type, value -
value. For instance, if item represents a feature, the property
type can be a type of feature annotation in a genome, e.g.
'function', 'strand', etc) -> structure: parameter "index" of
Long, parameter "id" of String, parameter "name" of String,
parameter "description" of String, parameter "properties" of
mapping from String to String, parameter "column_descriptors" of
list of type "ItemDescriptor" (Basic information about a
particular item in a collection. index - index of the item id - id
of the item name - name of the item description - description of
the item properties - additinal proerties: key - property type,
value - value. For instance, if item represents a feature, the
property type can be a type of feature annotation in a genome,
e.g. 'function', 'strand', etc) -> structure: parameter "index" of
Long, parameter "id" of String, parameter "name" of String,
parameter "description" of String, parameter "properties" of
mapping from String to String, parameter "row_set_stats" of type
"ItemSetStat" (Same as ItemStat, but for a set of Items. Actually
it can be modeled as a list<ItemStat>, but this way we can
optimize data transfer in two ways: 1. In parameters we can
specify that we need a subset of properties, e.g. only "avgs". 2.
No field names in json (avg, min, max, etc) for each element in
the list indeces_for - indeces of items in a collection FOR which
all statitics is collected indeces_on - indeces of items in the
associated vector ON which the statistics is calculated size -
number of elements defined by indeces_on (expected to be the same
for all items defined by indeces_for) avgs - mean values for each
item defined by indeces_for across all elements defined by
indeces_on mins - min values for each item defined by indeces_for
across all elements defined by indeces_on maxs - max values for
each item defined by indeces_for across all elements defined by
indeces_on stds - std values for each item defined by indeces_for
across all elements defined by indeces_on missing_values - number
of missing values for each item defined by indeces_for across all
elements defined by indeces_on) -> structure: parameter
"indeces_for" of list of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avgs" of list of
Double, parameter "mins" of list of Double, parameter "maxs" of
list of Double, parameter "stds" of list of Double, parameter
"missing_values" of list of Long, parameter "column_set_stat" of
type "ItemSetStat" (Same as ItemStat, but for a set of Items.
Actually it can be modeled as a list<ItemStat>, but this way we
can optimize data transfer in two ways: 1. In parameters we can
specify that we need a subset of properties, e.g. only "avgs". 2.
No field names in json (avg, min, max, etc) for each element in
the list indeces_for - indeces of items in a collection FOR which
all statitics is collected indeces_on - indeces of items in the
associated vector ON which the statistics is calculated size -
number of elements defined by indeces_on (expected to be the same
for all items defined by indeces_for) avgs - mean values for each
item defined by indeces_for across all elements defined by
indeces_on mins - min values for each item defined by indeces_for
across all elements defined by indeces_on maxs - max values for
each item defined by indeces_for across all elements defined by
indeces_on stds - std values for each item defined by indeces_for
across all elements defined by indeces_on missing_values - number
of missing values for each item defined by indeces_for across all
elements defined by indeces_on) -> structure: parameter
"indeces_for" of list of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avgs" of list of
Double, parameter "mins" of list of Double, parameter "maxs" of
list of Double, parameter "stds" of list of Double, parameter
"missing_values" of list of Long, parameter "mtx_row_set_stat" of
type "ItemSetStat" (Same as ItemStat, but for a set of Items.
Actually it can be modeled as a list<ItemStat>, but this way we
can optimize data transfer in two ways: 1. In parameters we can
specify that we need a subset of properties, e.g. only "avgs". 2.
No field names in json (avg, min, max, etc) for each element in
the list indeces_for - indeces of items in a collection FOR which
all statitics is collected indeces_on - indeces of items in the
associated vector ON which the statistics is calculated size -
number of elements defined by indeces_on (expected to be the same
for all items defined by indeces_for) avgs - mean values for each
item defined by indeces_for across all elements defined by
indeces_on mins - min values for each item defined by indeces_for
across all elements defined by indeces_on maxs - max values for
each item defined by indeces_for across all elements defined by
indeces_on stds - std values for each item defined by indeces_for
across all elements defined by indeces_on missing_values - number
of missing values for each item defined by indeces_for across all
elements defined by indeces_on) -> structure: parameter
"indeces_for" of list of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avgs" of list of
Double, parameter "mins" of list of Double, parameter "maxs" of
list of Double, parameter "stds" of list of Double, parameter
"missing_values" of list of Long, parameter "mtx_column_set_stat"
of type "ItemSetStat" (Same as ItemStat, but for a set of Items.
Actually it can be modeled as a list<ItemStat>, but this way we
can optimize data transfer in two ways: 1. In parameters we can
specify that we need a subset of properties, e.g. only "avgs". 2.
No field names in json (avg, min, max, etc) for each element in
the list indeces_for - indeces of items in a collection FOR which
all statitics is collected indeces_on - indeces of items in the
associated vector ON which the statistics is calculated size -
number of elements defined by indeces_on (expected to be the same
for all items defined by indeces_for) avgs - mean values for each
item defined by indeces_for across all elements defined by
indeces_on mins - min values for each item defined by indeces_for
across all elements defined by indeces_on maxs - max values for
each item defined by indeces_for across all elements defined by
indeces_on stds - std values for each item defined by indeces_for
across all elements defined by indeces_on missing_values - number
of missing values for each item defined by indeces_for across all
elements defined by indeces_on) -> structure: parameter
"indeces_for" of list of Long, parameter "indeces_on" of list of
Long, parameter "size" of Long, parameter "avgs" of list of
Double, parameter "mins" of list of Double, parameter "maxs" of
list of Double, parameter "stds" of list of Double, parameter
"missing_values" of list of Long, parameter
"row_pairwise_correlation" of type "PairwiseComparison" (To
represent a pairwise comparison of several elements defined by
'indeces'. This data type can be used to model represent pairwise
correlation of expression profiles for a set of genes. indeces -
indeces of elements to be compared comparison_values - values
representing a parituclar type of comparison between elements.
Expected to be symmetric: comparison_values[i][j] =
comparison_values[j][i]. Diagonal values: comparison_values[i][i]
= 0 avgs - mean of comparison_values for each element mins - min
of comparison_values for each element maxs - max of
comparison_values for each element stds - std of comparison_values
for each element) -> structure: parameter "indeces" of list of
Long, parameter "comparison_values" of list of list of Double,
parameter "avgs" of list of Double, parameter "mins" of list of
Double, parameter "maxs" of list of Double, parameter "stds" of
list of Double, parameter "column_pairwise_correlation" of type
"PairwiseComparison" (To represent a pairwise comparison of
several elements defined by 'indeces'. This data type can be used
to model represent pairwise correlation of expression profiles for
a set of genes. indeces - indeces of elements to be compared
comparison_values - values representing a parituclar type of
comparison between elements. Expected to be symmetric:
comparison_values[i][j] = comparison_values[j][i]. Diagonal
values: comparison_values[i][i] = 0 avgs - mean of
comparison_values for each element mins - min of comparison_values
for each element maxs - max of comparison_values for each element
stds - std of comparison_values for each element) -> structure:
parameter "indeces" of list of Long, parameter "comparison_values"
of list of list of Double, parameter "avgs" of list of Double,
parameter "mins" of list of Double, parameter "maxs" of list of
Double, parameter "stds" of list of Double, parameter "values" of
list of list of Double
"""
return self._client.call_method('KBaseFeatureValues.get_submatrix_stat',
[GetSubmatrixStatParams], self._service_ver, context)
def tsv_file_to_matrix(self, params, context=None):
"""
:param params: instance of type "TsvFileToMatrixParams"
(input_shock_id and input_file_path - alternative intput params,
genome_ref - optional reference to a Genome object that will be
used for mapping feature IDs to, fill_missing_values - optional
flag for filling in missing values in matrix (default value is
false), data_type - optional filed, value is one of
'untransformed', 'log2_level', 'log10_level', 'log2_ratio',
'log10_ratio' or 'unknown' (last one is default value), data_scale
- optional parameter (default value is '1.0').) -> structure:
parameter "input_shock_id" of String, parameter "input_file_path"
of String, parameter "genome_ref" of type "ws_genome_id" (The
workspace ID for a Genome data object. @id ws
KBaseGenomes.Genome), parameter "fill_missing_values" of type
"boolean" (Indicates true or false values, false = 0, true = 1
@range [0,1]), parameter "data_type" of String, parameter
"data_scale" of String, parameter "output_ws_name" of String,
parameter "output_obj_name" of String
:returns: instance of type "TsvFileToMatrixOutput" -> structure:
parameter "output_matrix_ref" of type "ws_matrix_id" (A workspace
ID that references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix)
"""
return self._client.call_method('KBaseFeatureValues.tsv_file_to_matrix',
[params], self._service_ver, context)
def matrix_to_tsv_file(self, params, context=None):
"""
:param params: instance of type "MatrixToTsvFileParams" -> structure:
parameter "input_ref" of type "ws_matrix_id" (A workspace ID that
references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix), parameter
"to_shock" of type "boolean" (Indicates true or false values,
false = 0, true = 1 @range [0,1]), parameter "file_path" of String
:returns: instance of type "MatrixToTsvFileOutput" -> structure:
parameter "file_path" of String, parameter "shock_id" of String
"""
return self._client.call_method('KBaseFeatureValues.matrix_to_tsv_file',
[params], self._service_ver, context)
def export_matrix(self, params, context=None):
"""
:param params: instance of type "ExportMatrixParams" -> structure:
parameter "input_ref" of type "ws_matrix_id" (A workspace ID that
references a Float2DMatrix wrapper data object. @id ws
KBaseFeatureValues.ExpressionMatrix
KBaseFeatureValues.SingleKnockoutFitnessMatrix)
:returns: instance of type "ExportMatrixOutput" -> structure:
parameter "shock_id" of String
"""
return self._client.call_method('KBaseFeatureValues.export_matrix',
[params], self._service_ver, context)
def clusters_to_file(self, params, context=None):
"""
:param params: instance of type "ClustersToFileParams" (format -
optional field, can be one of "TSV" or "SIF" ("TSV" is default
value).) -> structure: parameter "input_ref" of type
"ws_featureclusters_id" (The workspace ID of a FeatureClusters
data object. @id ws KBaseFeatureValues.FeatureClusters), parameter
"to_shock" of type "boolean" (Indicates true or false values,
false = 0, true = 1 @range [0,1]), parameter "file_path" of
String, parameter "format" of String
:returns: instance of type "ClustersToFileOutput" -> structure:
parameter "file_path" of String, parameter "shock_id" of String
"""
return self._client.call_method('KBaseFeatureValues.clusters_to_file',
[params], self._service_ver, context)
def export_clusters_tsv(self, params, context=None):
"""
:param params: instance of type "ExportClustersTsvParams" ->
structure: parameter "input_ref" of type "ws_featureclusters_id"
(The workspace ID of a FeatureClusters data object. @id ws
KBaseFeatureValues.FeatureClusters)
:returns: instance of type "ExportClustersTsvOutput" -> structure:
parameter "shock_id" of String
"""
return self._client.call_method('KBaseFeatureValues.export_clusters_tsv',
[params], self._service_ver, context)
def export_clusters_sif(self, params, context=None):
"""
:param params: instance of type "ExportClustersSifParams" ->
structure: parameter "input_ref" of type "ws_featureclusters_id"
(The workspace ID of a FeatureClusters data object. @id ws
KBaseFeatureValues.FeatureClusters)
:returns: instance of type "ExportClustersSifOutput" -> structure:
parameter "shock_id" of String
"""
return self._client.call_method('KBaseFeatureValues.export_clusters_sif',
[params], self._service_ver, context)
def status(self, context=None):
return self._client.call_method('KBaseFeatureValues.status',
[], self._service_ver, context)
| mit |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GL/feedback.py | 9 | 3420 | """Utility module to parse a Feedback buffer"""
from OpenGL import contextdata
from OpenGL.GL.VERSION import GL_1_1 as _simple
def parseFeedback( buffer, entryCount ):
"""Parse the feedback buffer into Python object records"""
bufferIndex = 0
result = []
getVertex = createGetVertex( )
while bufferIndex < entryCount:
token = int(buffer[bufferIndex])
bufferIndex += 1
if token in SINGLE_VERTEX_TOKENS:
vData, bufferIndex = getVertex( buffer, bufferIndex )
result.append( (SINGLE_VERTEX_TOKENS.get(token), Vertex(*vData)) )
elif token in DOUBLE_VERTEX_TOKENS:
vData, bufferIndex = getVertex( buffer, bufferIndex )
vData2, bufferIndex = getVertex( buffer, bufferIndex )
result.append( (
DOUBLE_VERTEX_TOKENS.get(token),
Vertex(*vData),
Vertex(*vData2),
) )
elif token == _simple.GL_PASS_THROUGH_TOKEN:
result.append( (_simple.GL_PASS_THROUGH_TOKEN, buffer[bufferIndex]))
bufferIndex += 1
elif token == _simple.GL_POLYGON_TOKEN:
temp = [_simple.GL_POLYGON_TOKEN]
count = int(buffer[bufferIndex])
bufferIndex += 1
for item in range(count):
vData,bufferIndex = getVertex( buffer, bufferIndex )
temp.append( Vertex(*vData))
result.append( tuple(temp))
else:
raise ValueError(
"""Unrecognised token %r in feedback stream"""%(token,)
)
return result
SINGLE_VERTEX_TOKENS = {
_simple.GL_BITMAP_TOKEN: _simple.GL_BITMAP_TOKEN,
_simple.GL_COPY_PIXEL_TOKEN: _simple.GL_COPY_PIXEL_TOKEN,
_simple.GL_DRAW_PIXEL_TOKEN: _simple.GL_DRAW_PIXEL_TOKEN,
_simple.GL_POINT_TOKEN: _simple.GL_POINT_TOKEN,
}
DOUBLE_VERTEX_TOKENS = {
_simple.GL_LINE_TOKEN: _simple.GL_LINE_TOKEN,
_simple.GL_LINE_RESET_TOKEN: _simple.GL_LINE_RESET_TOKEN,
}
class Vertex( object ):
"""Simplistic holder for vertex data from a feedback buffer"""
__slots__ = ('vertex','color','texture')
def __init__( self, vertex,color=None,texture=None):
"""Store values for access"""
self.vertex = vertex
self.color = color
self.texture = texture
def createGetVertex( ):
mode = contextdata.getValue( "GL_FEEDBACK_BUFFER_TYPE" )
indexMode = _simple.glGetBooleanv( _simple.GL_INDEX_MODE )
colorSize = [ 4,1 ][ int(indexMode) ]
if mode in (_simple.GL_2D,_simple.GL_3D):
if mode == _simple.GL_2D:
size = 2
else:
size = 3
def getVertex( buffer, bufferIndex ):
end = bufferIndex+size
return (buffer[bufferIndex:end],None,None),end
elif mode == _simple.GL_3D_COLOR:
def getVertex( buffer, bufferIndex ):
end = bufferIndex+3
colorEnd = end + colorSize
return (buffer[bufferIndex:end],buffer[end:colorEnd],None),colorEnd
else:
if mode == _simple.GL_3D_COLOR_TEXTURE:
size = 3
else:
size = 4
def getVertex( buffer, bufferIndex ):
end = bufferIndex+size
colorEnd = end + colorSize
textureEnd = colorEnd + 4
return (buffer[bufferIndex:end],buffer[end:colorEnd],buffer[colorEnd:textureEnd]),textureEnd
return getVertex
| lgpl-3.0 |
dhalleine/tensorflow | tensorflow/python/kernel_tests/template_test.py | 1 | 9074 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for make_template."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import traceback
import tensorflow as tf
from tensorflow.python.ops import template
def var_scoped_function():
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def internally_var_scoped_function(scope_name):
with tf.variable_scope(scope_name):
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
def function_with_create(trainable):
"""Creates a variable as a side effect using tf.Variable."""
tf.Variable(0, trainable=trainable)
return tf.get_variable("dummy",
shape=[1],
initializer=tf.zeros_initializer)
class TemplateTest(tf.test.TestCase):
def test_end_to_end(self):
"""This test shows a very simple line model with test_loss.
The template is used to share parameters between a training and test model.
"""
# y = 2x + 1
training_input, training_output = ([1., 2., 3., 4.], [2.8, 5.1, 7.2, 8.7])
test_input, test_output = ([5., 6., 7., 8.], [11, 13, 15, 17])
tf.set_random_seed(1234)
def test_line(x):
m = tf.get_variable("w", shape=[],
initializer=tf.truncated_normal_initializer())
b = tf.get_variable("b", shape=[],
initializer=tf.truncated_normal_initializer())
return x * m + b
line_template = template.make_template("line", test_line)
train_prediction = line_template(training_input)
test_prediction = line_template(test_input)
train_loss = tf.reduce_mean(tf.square(train_prediction - training_output))
test_loss = tf.reduce_mean(tf.square(test_prediction - test_output))
optimizer = tf.train.GradientDescentOptimizer(0.1)
train_op = optimizer.minimize(train_loss)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
initial_test_loss = sess.run(test_loss)
sess.run(train_op)
final_test_loss = sess.run(test_loss)
# Parameters are tied, so the loss should have gone down when we trained it.
self.assertLess(final_test_loss, initial_test_loss)
def test_skip_stack_frames(self):
first = traceback.format_stack()
second = traceback.format_stack()
result = template._skip_common_stack_elements(first, second)
self.assertEqual(1, len(result))
self.assertNotEqual(len(first), len(result))
def test_template_with_name(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/dummy:0", v1.name)
self.assertEqual("s1_1/dummy:0", v3.name)
def test_template_in_scope(self):
tmpl1 = template.make_template("s1", var_scoped_function)
tmpl2 = template.make_template("s1", var_scoped_function)
with tf.variable_scope("scope"):
v1 = tmpl1()
v3 = tmpl2()
# The template contract requires the following to ignore scope2.
with tf.variable_scope("scope2"):
v2 = tmpl1()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("scope/s1/dummy:0", v1.name)
self.assertEqual("scope/s1_1/dummy:0", v3.name)
def test_template_with_internal_reuse(self):
tmpl1 = template.make_template("s1", internally_var_scoped_function)
tmpl2 = template.make_template("s1", internally_var_scoped_function)
v1 = tmpl1("test")
v2 = tmpl1("test")
v3 = tmpl2("test")
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
with self.assertRaises(ValueError):
tmpl1("not_test")
def test_template_without_name(self):
with self.assertRaises(ValueError):
template.make_template(None, var_scoped_function)
def test_make_template(self):
# Test both that we can call it with positional and keywords.
tmpl1 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
tmpl2 = template.make_template(
"s1", internally_var_scoped_function, scope_name="test")
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/test/dummy:0", v1.name)
self.assertEqual("s1_1/test/dummy:0", v3.name)
def test_enforces_no_extra_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=True)
tmpl()
with self.assertRaises(ValueError):
tmpl()
def test_permits_extra_non_trainable_variables(self):
tmpl = template.make_template("s", function_with_create, trainable=False)
self.assertEqual(tmpl(), tmpl())
def test_internal_variable_reuse(self):
def nested():
with tf.variable_scope("nested") as vs:
v1 = tf.get_variable("x", initializer=tf.zeros_initializer, shape=[])
with tf.variable_scope(vs, reuse=True):
v2 = tf.get_variable("x")
self.assertEqual(v1, v2)
return v1
tmpl1 = template.make_template("s1", nested)
tmpl2 = template.make_template("s1", nested)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested/x:0", v1.name)
self.assertEqual("s1_1/nested/x:0", v3.name)
def test_nested_templates(self):
def nested_template():
nested1 = template.make_template("nested", var_scoped_function)
nested2 = template.make_template("nested", var_scoped_function)
v1 = nested1()
v2 = nested2()
self.assertNotEqual(v1, v2)
return v2
tmpl1 = template.make_template("s1", nested_template)
tmpl2 = template.make_template("s1", nested_template)
v1 = tmpl1()
v2 = tmpl1()
v3 = tmpl2()
self.assertEqual(v1, v2)
self.assertNotEqual(v1, v3)
self.assertEqual("s1/nested_1/dummy:0", v1.name)
self.assertEqual("s1_1/nested_1/dummy:0", v3.name)
def test_immediate_scope_creation(self):
# Create templates in scope a then call in scope b. make_template should
# capture the scope the first time it is called, and make_immediate_template
# should capture the scope at construction time.
with tf.variable_scope("ctor_scope"):
tmpl_immed = template.make_template(
"a", var_scoped_function, True) # create scope here
tmpl_defer = template.make_template(
"b", var_scoped_function, False) # default: create scope at __call__
with tf.variable_scope("call_scope"):
inner_imm_var = tmpl_immed()
inner_defer_var = tmpl_defer()
outer_imm_var = tmpl_immed()
outer_defer_var = tmpl_defer()
self.assertNotEqual(inner_imm_var, inner_defer_var)
self.assertEqual(outer_imm_var, inner_imm_var)
self.assertEqual(outer_defer_var, inner_defer_var)
self.assertEqual("ctor_scope/a/dummy:0", inner_imm_var.name)
self.assertEqual("call_scope/b/dummy:0", inner_defer_var.name)
def test_scope_access(self):
# Ensure that we can access the scope inside the template, because the name
# of that scope may be different from the name we pass to make_template, due
# to having been made unique by variable_op_scope.
with tf.variable_scope("foo"):
# Create two templates with the same name, ensure scopes are made unique.
ta = template.make_template("bar", var_scoped_function, True)
tb = template.make_template("bar", var_scoped_function, True)
# Ensure we can get the scopes before either template is actually called.
self.assertEqual(ta.var_scope.name, "foo/bar")
self.assertEqual(tb.var_scope.name, "foo/bar_1")
with tf.variable_scope("foo_2"):
# Create a template which defers scope creation.
tc = template.make_template("blah", var_scoped_function, False)
# Before we call the template, the scope property will be set to None.
self.assertEqual(tc.var_scope, None)
tc()
# Template is called at the top level, so there is no preceding "foo_2".
self.assertEqual(tc.var_scope.name, "blah")
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
helldorado/ansible | lib/ansible/modules/cloud/webfaction/webfaction_site.py | 44 | 6530 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Quentin Stafford-Fraser
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Create Webfaction website using Ansible and the Webfaction API
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: webfaction_site
short_description: Add or remove a website on a Webfaction host
description:
- Add or remove a website on a Webfaction host. Further documentation at https://github.com/quentinsf/ansible-webfaction.
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- >
You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API <https://docs.webfaction.com/xmlrpc-api/>`_ for more info.
options:
name:
description:
- The name of the website
required: true
state:
description:
- Whether the website should exist
choices: ['present', 'absent']
default: "present"
host:
description:
- The webfaction host on which the site should be created.
required: true
https:
description:
- Whether or not to use HTTPS
type: bool
default: 'no'
site_apps:
description:
- A mapping of URLs to apps
default: []
subdomains:
description:
- A list of subdomains associated with this site.
default: []
login_name:
description:
- The webfaction account to use
required: true
login_password:
description:
- The webfaction password to use
required: true
'''
EXAMPLES = '''
- name: create website
webfaction_site:
name: testsite1
state: present
host: myhost.webfaction.com
subdomains:
- 'testsite1.my_domain.org'
site_apps:
- ['testapp1', '/']
https: no
login_name: "{{webfaction_user}}"
login_password: "{{webfaction_passwd}}"
'''
import socket
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves import xmlrpc_client
webfaction = xmlrpc_client.ServerProxy('https://api.webfaction.com/')
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=False, choices=['present', 'absent'], default='present'),
# You can specify an IP address or hostname.
host=dict(required=True),
https=dict(required=False, type='bool', default=False),
subdomains=dict(required=False, type='list', default=[]),
site_apps=dict(required=False, type='list', default=[]),
login_name=dict(required=True),
login_password=dict(required=True, no_log=True),
),
supports_check_mode=True
)
site_name = module.params['name']
site_state = module.params['state']
site_host = module.params['host']
site_ip = socket.gethostbyname(site_host)
session_id, account = webfaction.login(
module.params['login_name'],
module.params['login_password']
)
site_list = webfaction.list_websites(session_id)
site_map = dict([(i['name'], i) for i in site_list])
existing_site = site_map.get(site_name)
result = {}
# Here's where the real stuff happens
if site_state == 'present':
# Does a site with this name already exist?
if existing_site:
# If yes, but it's on a different IP address, then fail.
# If we wanted to allow relocation, we could add a 'relocate=true' option
# which would get the existing IP address, delete the site there, and create it
# at the new address. A bit dangerous, perhaps, so for now we'll require manual
# deletion if it's on another host.
if existing_site['ip'] != site_ip:
module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
# If it's on this host and the key parameters are the same, nothing needs to be done.
if (existing_site['https'] == module.boolean(module.params['https'])) and \
(set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
(dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
module.exit_json(
changed=False
)
positional_args = [
session_id, site_name, site_ip,
module.boolean(module.params['https']),
module.params['subdomains'],
]
for a in module.params['site_apps']:
positional_args.append((a[0], a[1]))
if not module.check_mode:
# If this isn't a dry run, create or modify the site
result.update(
webfaction.create_website(
*positional_args
) if not existing_site else webfaction.update_website(
*positional_args
)
)
elif site_state == 'absent':
# If the site's already not there, nothing changed.
if not existing_site:
module.exit_json(
changed=False,
)
if not module.check_mode:
# If this isn't a dry run, delete the site
result.update(
webfaction.delete_website(session_id, site_name, site_ip)
)
else:
module.fail_json(msg="Unknown state specified: {0}".format(site_state))
module.exit_json(
changed=True,
result=result
)
if __name__ == '__main__':
main()
| gpl-3.0 |
faegi/mapproxy | mapproxy/seed/cachelock.py | 5 | 3758 | # This file is part of the MapProxy project.
# Copyright (C) 2012 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import errno
import os
import sqlite3
import time
from contextlib import contextmanager
class CacheLockedError(Exception):
pass
class CacheLocker(object):
def __init__(self, lockfile, polltime=0.1):
self.lockfile = lockfile
self.polltime = polltime
self._initialize_lockfile()
def _initialize_lockfile(self):
db = sqlite3.connect(self.lockfile)
db.execute("""
CREATE TABLE IF NOT EXISTS cache_locks (
cache_name TEXT NOT NULL,
created REAL NOT NULL,
pid INTEGER NUT NULL
);
""")
db.commit()
db.close()
@contextmanager
def _exclusive_db_cursor(self):
db = sqlite3.connect(self.lockfile, isolation_level="EXCLUSIVE")
db.row_factory = sqlite3.Row
cur = db.cursor()
try:
yield cur
finally:
db.commit()
db.close()
@contextmanager
def lock(self, cache_name, no_block=False):
pid = os.getpid()
while True:
with self._exclusive_db_cursor() as cur:
self._add_lock(cur, cache_name, pid)
if self._poll(cur, cache_name, pid):
break
elif no_block:
raise CacheLockedError()
time.sleep(self.polltime)
try:
yield
finally:
with self._exclusive_db_cursor() as cur:
self._remove_lock(cur, cache_name, pid)
def _poll(self, cur, cache_name, pid):
active_locks = False
cur.execute("SELECT * from cache_locks where cache_name = ? ORDER BY created", (cache_name, ))
for lock in cur:
if not active_locks and lock['cache_name'] == cache_name and lock['pid'] == pid:
# we are waiting and it is out turn
return True
if not is_running(lock['pid']):
self._remove_lock(cur, lock['cache_name'], lock['pid'])
else:
active_locks = True
return not active_locks
def _add_lock(self, cur, cache_name, pid):
cur.execute("SELECT count(*) from cache_locks WHERE cache_name = ? AND pid = ?", (cache_name, pid))
if cur.fetchone()[0] == 0:
cur.execute("INSERT INTO cache_locks (cache_name, pid, created) VALUES (?, ?, ?)", (cache_name, pid, time.time()))
def _remove_lock(self, cur, cache_name, pid):
cur.execute("DELETE FROM cache_locks WHERE cache_name = ? AND pid = ?", (cache_name, pid))
class DummyCacheLocker(object):
@contextmanager
def lock(self, cache_name, no_block=False):
yield
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
return True
else:
raise err
else:
return True
if __name__ == '__main__':
locker = CacheLocker('/tmp/cachelock_test')
with locker.lock('foo'):
pass | apache-2.0 |
M4sse/chromium.src | third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py | 109 | 23362 | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import aliaspass
from closure_linter import closurizednamespacesinfo
from closure_linter import ecmametadatapass
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['goog'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(
input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals(1, len(missing_provides))
missing_provide = missing_provides.popitem()
self.assertEquals('package.Foo', missing_provide[0])
self.assertEquals(1, missing_provide[1])
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_req = missing_requires.popitem()
self.assertEquals('package.Foo', missing_req[0])
self.assertEquals(1, missing_req[1])
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();',
'package.Foo.anotherMethodName1();',
'package.Foo.anotherMethodName2();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
missing_requires = namespaces_info.GetMissingRequires()
self.assertEquals(1, len(missing_requires))
missing_require = missing_requires.popitem()
self.assertEquals('package.Foo', missing_require[0])
# Make sure line number of first occurrence is reported
self.assertEquals(2, missing_require[1])
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['package'])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
'The whole class, not the object, should be required.')
def testGetMissingRequires_variableWithSameName(self):
"""Tests that we should not goog.require variables and parameters.
b/5362203 Variables in scope are not missing namespaces.
"""
input_lines = [
'goog.provide(\'Foo\');',
'Foo.A = function();',
'Foo.A.prototype.method = function(ab) {',
' if (ab) {',
' var docs;',
' var lvalue = new Obj();',
' // Variable in scope hence not goog.require here.',
' docs.foo.abc = 1;',
' lvalue.next();',
' }',
' // Since js is function scope this should also not goog.require.',
' docs.foo.func();',
' // Its not a variable in scope hence goog.require.',
' dummy.xyz.reset();',
' return this.method2();',
'};',
'Foo.A.prototype.method1 = function(docs, abcd, xyz) {',
' // Parameter hence not goog.require.',
' docs.nodes.length = 2;',
' lvalue.abc.reset();',
'};'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['Foo',
'docs',
'lvalue',
'dummy'])
missing_requires = namespaces_info.GetMissingRequires()
self.assertEquals(2, len(missing_requires))
self.assertItemsEqual(
{'dummy.xyz': 14,
'lvalue.abc': 20}, missing_requires)
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
input_lines, ['package'])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = testutil.TokenizeSource(input_lines)
self.assertEquals('package.Foo.veryLong.identifier',
tokenutil.GetIdentifierForToken(token))
self.assertEquals(None,
tokenutil.GetIdentifierForToken(token.next))
def testScopified(self):
"""Tests that a goog.scope call is noticed."""
input_lines = [
'goog.scope(function() {',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
self.assertTrue(namespaces_info._scopified_file)
def testScope_unusedAlias(self):
"""Tests that an used alias symbol doesn't result in a require."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires = namespaces_info.GetMissingRequires()
self.assertEquals({}, missing_requires)
def testScope_usedAlias(self):
"""Tests that aliased symbols result in correct requires."""
input_lines = [
'goog.scope(function() {',
'var Event = goog.events.Event;',
'var dom = goog.dom;',
'Event(dom.classes.get);',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_requires = namespaces_info.GetMissingRequires()
self.assertEquals({'goog.dom.classes': 4, 'goog.events.Event': 4},
missing_requires)
def testScope_provides(self):
"""Tests that aliased symbols result in correct provides."""
input_lines = [
'goog.scope(function() {',
'goog.bar = {};',
'var bar = goog.bar;',
'bar.Foo = {};',
'});'
]
namespaces_info = self._GetNamespacesInfoForScript(input_lines, ['goog'])
missing_provides = namespaces_info.GetMissingProvides()
self.assertEquals({'goog.bar.Foo': 4}, missing_provides)
def testSetTestOnlyNamespaces(self):
"""Tests that a namespace in setTestOnly makes it a valid provide."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'goog.foo.barTest\');'
], ['goog'])
token = self._GetProvideTokens('goog.foo.barTest')
self.assertFalse(namespaces_info.IsExtraProvide(token))
token = self._GetProvideTokens('goog.foo.bazTest')
self.assertTrue(namespaces_info.IsExtraProvide(token))
def testSetTestOnlyComment(self):
"""Ensure a comment in setTestOnly does not cause a created namespace."""
namespaces_info = self._GetNamespacesInfoForScript([
'goog.setTestOnly(\'this is a comment\');'
], ['goog'])
self.assertEquals(
[], namespaces_info._created_namespaces,
'A comment in setTestOnly should not modify created namespaces.')
def _GetNamespacesInfoForScript(self, script, closurized_namespaces=None):
_, namespaces_info = self._GetStartTokenAndNamespacesInfoForScript(
script, closurized_namespaces)
return namespaces_info
def _GetStartTokenAndNamespacesInfoForScript(
self, script, closurized_namespaces):
token = testutil.TokenizeSource(script)
return token, self._GetInitializedNamespacesInfo(
token, closurized_namespaces, [])
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
ecma_pass = ecmametadatapass.EcmaMetaDataPass()
ecma_pass.Process(token)
alias_pass = aliaspass.AliasPass(closurized_namespaces)
alias_pass.Process(token)
while token:
state_tracker.HandleToken(token, state_tracker.GetLastNonSpaceToken())
namespaces_info.ProcessToken(token, state_tracker)
state_tracker.HandleAfterToken(token)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return testutil.TokenizeSource([line_text])
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
donutmonger/youtube-dl | setup.py | 107 | 3233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os.path
import warnings
import sys
try:
from setuptools import setup
setuptools_available = True
except ImportError:
from distutils.core import setup
setuptools_available = False
try:
# This will create an exe that needs Microsoft Visual C++ 2008
# Redistributable Package
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('etc/bash_completion.d', ['youtube-dl.bash-completion']),
('etc/fish/completions', ['youtube-dl.fish']),
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1', ['youtube-dl.1'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
if setuptools_available:
params['entry_points'] = {'console_scripts': ['youtube-dl = youtube_dl:main']}
else:
params['scripts'] = ['bin/youtube-dl']
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(),
'youtube_dl/version.py', 'exec'))
setup(
name='youtube_dl',
version=__version__,
description='YouTube video downloader',
long_description='Small command-line program to download videos from'
' YouTube.com and other video sites.',
url='https://github.com/rg3/youtube-dl',
author='Ricardo Garcia',
author_email='[email protected]',
maintainer='Philipp Hagemeister',
maintainer_email='[email protected]',
packages=[
'youtube_dl',
'youtube_dl.extractor', 'youtube_dl.downloader',
'youtube_dl.postprocessor'],
# Provokes warning on most systems (why?!)
# test_suite = 'nose.collector',
# test_requires = ['nosetest'],
classifiers=[
"Topic :: Multimedia :: Video",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: Public Domain",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
**params
)
| unlicense |
EricCline/CEM_inc | env/lib/python2.7/site-packages/IPython/html/widgets/tests/test_interaction.py | 2 | 12804 | """Test interact and interactive."""
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
from collections import OrderedDict
import nose.tools as nt
import IPython.testing.tools as tt
# from IPython.core.getipython import get_ipython
from IPython.html import widgets
from IPython.html.widgets import interact, interactive, Widget, interaction
from IPython.utils.py3compat import annotate
#-----------------------------------------------------------------------------
# Utility stuff
#-----------------------------------------------------------------------------
class DummyComm(object):
comm_id = 'a-b-c-d'
def send(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
_widget_attrs = {}
displayed = []
def setup():
_widget_attrs['comm'] = Widget.comm
Widget.comm = DummyComm()
_widget_attrs['_ipython_display_'] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
def teardown():
for attr, value in _widget_attrs.items():
setattr(Widget, attr, value)
def f(**kwargs):
pass
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
#-----------------------------------------------------------------------------
# Actual tests
#-----------------------------------------------------------------------------
def check_widget(w, **d):
"""Check a single widget against a dict"""
for attr, expected in d.items():
if attr == 'cls':
nt.assert_is(w.__class__, expected)
else:
value = getattr(w, attr)
nt.assert_equal(value, expected,
"%s.%s = %r != %r" % (w.__class__.__name__, attr, value, expected)
)
def check_widgets(container, **to_check):
"""Check that widgets are created as expected"""
# build a widget dictionary, so it matches
widgets = {}
for w in container.children:
widgets[w.description] = w
for key, d in to_check.items():
nt.assert_in(key, widgets)
check_widget(widgets[key], **d)
def test_single_value_string():
a = u'hello'
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.TextWidget,
description='a',
value=a,
)
def test_single_value_bool():
for a in (True, False):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.CheckboxWidget,
description='a',
value=a,
)
def test_single_value_dict():
for d in [
dict(a=5),
dict(a=5, b='b', c=dict),
]:
c = interactive(f, d=d)
w = c.children[0]
check_widget(w,
cls=widgets.DropdownWidget,
description='d',
values=d,
value=next(iter(d.values())),
)
def test_single_value_float():
for a in (2.25, 1.0, -3.5):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.FloatSliderWidget,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=0.1,
readout=True,
)
def test_single_value_int():
for a in (1, 5, -3):
c = interactive(f, a=a)
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.IntSliderWidget,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=1,
readout=True,
)
def test_list_tuple_2_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,1))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,-1))
for min, max in [ (0,1), (1,10), (1,2), (-5,5), (-20,-19) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSliderWidget,
min=min,
max=max,
step=1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,-1))
for min, max, step in [ (0,2,1), (1,10,2), (1,100,2), (-5,5,4), (-100,-20,4) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSliderWidget,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_2_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1.0,1.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(0.5,-0.5))
for min, max in [ (0.5, 1.5), (1.1,10.2), (1,2.2), (-5.,5), (-20,-19.) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSliderWidget,
min=min,
max=max,
step=.1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,0.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(-1,-2,1.))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2.,-1.))
for min, max, step in [ (0.,2,1), (1,10.,2), (1,100,2.), (-5.,5.,4), (-100,-20.,4.) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSliderWidget,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_str():
values = ['hello', 'there', 'guy']
first = values[0]
dvalues = OrderedDict((v,v) for v in values)
c = interactive(f, tup=tuple(values), lis=list(values))
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.DropdownWidget,
value=first,
values=dvalues
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_invalid():
for bad in [
(),
(5, 'hi'),
('hi', 5),
({},),
(None,),
]:
with nt.assert_raises(ValueError):
print(bad) # because there is no custom message in assert_raises
c = interactive(f, tup=bad)
def test_defaults():
@annotate(n=10)
def f(n, f=4.5, g=1):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSliderWidget,
value=10,
),
f=dict(
cls=widgets.FloatSliderWidget,
value=4.5,
),
g=dict(
cls=widgets.IntSliderWidget,
value=1,
),
)
def test_default_values():
@annotate(n=10, f=(0, 10.), g=5, h={'a': 1, 'b': 2}, j=['hi', 'there'])
def f(n, f=4.5, g=1, h=2, j='there'):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSliderWidget,
value=10,
),
f=dict(
cls=widgets.FloatSliderWidget,
value=4.5,
),
g=dict(
cls=widgets.IntSliderWidget,
value=5,
),
h=dict(
cls=widgets.DropdownWidget,
values={'a': 1, 'b': 2},
value=2
),
j=dict(
cls=widgets.DropdownWidget,
values={'hi':'hi', 'there':'there'},
value='there'
),
)
def test_default_out_of_bounds():
@annotate(f=(0, 10.), h={'a': 1}, j=['hi', 'there'])
def f(f='hi', h=5, j='other'):
pass
c = interactive(f)
check_widgets(c,
f=dict(
cls=widgets.FloatSliderWidget,
value=5.,
),
h=dict(
cls=widgets.DropdownWidget,
values={'a': 1},
value=1,
),
j=dict(
cls=widgets.DropdownWidget,
values={'hi':'hi', 'there':'there'},
value='hi',
),
)
def test_annotations():
@annotate(n=10, f=widgets.FloatTextWidget())
def f(n, f):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSliderWidget,
value=10,
),
f=dict(
cls=widgets.FloatTextWidget,
),
)
def test_priority():
@annotate(annotate='annotate', kwarg='annotate')
def f(kwarg='default', annotate='default', default='default'):
pass
c = interactive(f, kwarg='kwarg')
check_widgets(c,
kwarg=dict(
cls=widgets.TextWidget,
value='kwarg',
),
annotate=dict(
cls=widgets.TextWidget,
value='annotate',
),
)
@nt.with_setup(clear_display)
def test_decorator_kwarg():
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a=5)
def foo(a):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSliderWidget,
value=5,
)
@nt.with_setup(clear_display)
def test_decorator_no_call():
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.TextWidget,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.TextWidget,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact_kwargs():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo, a=10)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSliderWidget,
value=10,
)
@nt.with_setup(clear_display)
def test_call_decorated_on_trait_change():
"""test calling @interact decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.TextWidget,
value='default',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
@nt.with_setup(clear_display)
def test_call_decorated_kwargs_on_trait_change():
"""test calling @interact(foo=bar) decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a='kwarg')
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.TextWidget,
value='kwarg',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
def test_fixed():
c = interactive(f, a=widgets.fixed(5), b='text')
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.TextWidget,
value='text',
description='b',
)
| mit |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_compiler.py | 112 | 11233 | import test.test_support
compiler = test.test_support.import_module('compiler', deprecated=True)
from compiler.ast import flatten
import os, sys, time, unittest
from random import random
from StringIO import StringIO
# How much time in seconds can pass before we print a 'Still working' message.
_PRINT_WORKING_MSG_INTERVAL = 5 * 60
class TrivialContext(object):
def __enter__(self):
return self
def __exit__(self, *exc_info):
pass
class CompilerTest(unittest.TestCase):
def testCompileLibrary(self):
# A simple but large test. Compile all the code in the
# standard library and its test suite. This doesn't verify
# that any of the code is correct, merely the compiler is able
# to generate some kind of code for it.
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
# warning: if 'os' or 'test_support' are moved in some other dir,
# they should be changed here.
libdir = os.path.dirname(os.__file__)
testdir = os.path.dirname(test.test_support.__file__)
for dir in [testdir]:
for basename in "test_os.py",:
# Print still working message since this test can be really slow
if next_time <= time.time():
next_time = time.time() + _PRINT_WORKING_MSG_INTERVAL
print >>sys.__stdout__, \
' testCompileLibrary still working, be patient...'
sys.__stdout__.flush()
if not basename.endswith(".py"):
continue
if not TEST_ALL and random() < 0.98:
continue
path = os.path.join(dir, basename)
if test.test_support.verbose:
print "compiling", path
f = open(path, "U")
buf = f.read()
f.close()
if "badsyntax" in basename or "bad_coding" in basename:
self.assertRaises(SyntaxError, compiler.compile,
buf, basename, "exec")
else:
try:
compiler.compile(buf, basename, "exec")
except Exception, e:
args = list(e.args)
args.append("in file %s]" % basename)
#args[0] += "[in file %s]" % basename
e.args = tuple(args)
raise
def testNewClassSyntax(self):
compiler.compile("class foo():pass\n\n","<string>","exec")
def testYieldExpr(self):
compiler.compile("def g(): yield\n\n", "<string>", "exec")
def testKeywordAfterStarargs(self):
def f(*args, **kwargs):
self.assertEqual((args, kwargs), ((2,3), {'x': 1, 'y': 4}))
c = compiler.compile('f(x=1, *(2, 3), y=4)', '<string>', 'exec')
exec c in {'f': f}
self.assertRaises(SyntaxError, compiler.parse, "foo(a=1, b)")
self.assertRaises(SyntaxError, compiler.parse, "foo(1, *args, 3)")
def testTryExceptFinally(self):
# Test that except and finally clauses in one try stmt are recognized
c = compiler.compile("try:\n 1//0\nexcept:\n e = 1\nfinally:\n f = 1",
"<string>", "exec")
dct = {}
exec c in dct
self.assertEqual(dct.get('e'), 1)
self.assertEqual(dct.get('f'), 1)
def testDefaultArgs(self):
self.assertRaises(SyntaxError, compiler.parse, "def foo(a=1, b): pass")
def testDocstrings(self):
c = compiler.compile('"doc"', '<string>', 'exec')
self.assertIn('__doc__', c.co_names)
c = compiler.compile('def f():\n "doc"', '<string>', 'exec')
g = {}
exec c in g
self.assertEqual(g['f'].__doc__, "doc")
def testLineNo(self):
# Test that all nodes except Module have a correct lineno attribute.
filename = __file__
if filename.endswith((".pyc", ".pyo")):
filename = filename[:-1]
tree = compiler.parseFile(filename)
self.check_lineno(tree)
def check_lineno(self, node):
try:
self._check_lineno(node)
except AssertionError:
print node.__class__, node.lineno
raise
def _check_lineno(self, node):
if not node.__class__ in NOLINENO:
self.assertIsInstance(node.lineno, int,
"lineno=%s on %s" % (node.lineno, node.__class__))
self.assertTrue(node.lineno > 0,
"lineno=%s on %s" % (node.lineno, node.__class__))
for child in node.getChildNodes():
self.check_lineno(child)
def testFlatten(self):
self.assertEqual(flatten([1, [2]]), [1, 2])
self.assertEqual(flatten((1, (2,))), [1, 2])
def testNestedScope(self):
c = compiler.compile('def g():\n'
' a = 1\n'
' def f(): return a + 2\n'
' return f()\n'
'result = g()',
'<string>',
'exec')
dct = {}
exec c in dct
self.assertEqual(dct.get('result'), 3)
def testGenExp(self):
c = compiler.compile('list((i,j) for i in range(3) if i < 3'
' for j in range(4) if j > 2)',
'<string>',
'eval')
self.assertEqual(eval(c), [(0, 3), (1, 3), (2, 3)])
def testSetLiteral(self):
c = compiler.compile('{1, 2, 3}', '<string>', 'eval')
self.assertEqual(eval(c), {1,2,3})
c = compiler.compile('{1, 2, 3,}', '<string>', 'eval')
self.assertEqual(eval(c), {1,2,3})
def testDictLiteral(self):
c = compiler.compile('{1:2, 2:3, 3:4}', '<string>', 'eval')
self.assertEqual(eval(c), {1:2, 2:3, 3:4})
c = compiler.compile('{1:2, 2:3, 3:4,}', '<string>', 'eval')
self.assertEqual(eval(c), {1:2, 2:3, 3:4})
def testSetComp(self):
c = compiler.compile('{x for x in range(1, 4)}', '<string>', 'eval')
self.assertEqual(eval(c), {1, 2, 3})
c = compiler.compile('{x * y for x in range(3) if x != 0'
' for y in range(4) if y != 0}',
'<string>',
'eval')
self.assertEqual(eval(c), {1, 2, 3, 4, 6})
def testDictComp(self):
c = compiler.compile('{x:x+1 for x in range(1, 4)}', '<string>', 'eval')
self.assertEqual(eval(c), {1:2, 2:3, 3:4})
c = compiler.compile('{(x, y) : y for x in range(2) if x != 0'
' for y in range(3) if y != 0}',
'<string>',
'eval')
self.assertEqual(eval(c), {(1, 2): 2, (1, 1): 1})
def testWith(self):
# SF bug 1638243
c = compiler.compile('from __future__ import with_statement\n'
'def f():\n'
' with TrivialContext():\n'
' return 1\n'
'result = f()',
'<string>',
'exec' )
dct = {'TrivialContext': TrivialContext}
exec c in dct
self.assertEqual(dct.get('result'), 1)
def testWithAss(self):
c = compiler.compile('from __future__ import with_statement\n'
'def f():\n'
' with TrivialContext() as tc:\n'
' return 1\n'
'result = f()',
'<string>',
'exec' )
dct = {'TrivialContext': TrivialContext}
exec c in dct
self.assertEqual(dct.get('result'), 1)
def testWithMult(self):
events = []
class Ctx:
def __init__(self, n):
self.n = n
def __enter__(self):
events.append(self.n)
def __exit__(self, *args):
pass
c = compiler.compile('from __future__ import with_statement\n'
'def f():\n'
' with Ctx(1) as tc, Ctx(2) as tc2:\n'
' return 1\n'
'result = f()',
'<string>',
'exec' )
dct = {'Ctx': Ctx}
exec c in dct
self.assertEqual(dct.get('result'), 1)
self.assertEqual(events, [1, 2])
def testGlobal(self):
code = compiler.compile('global x\nx=1', '<string>', 'exec')
d1 = {'__builtins__': {}}
d2 = {}
exec code in d1, d2
# x should be in the globals dict
self.assertEqual(d1.get('x'), 1)
def testPrintFunction(self):
c = compiler.compile('from __future__ import print_function\n'
'print("a", "b", sep="**", end="++", '
'file=output)',
'<string>',
'exec' )
dct = {'output': StringIO()}
exec c in dct
self.assertEqual(dct['output'].getvalue(), 'a**b++')
def _testErrEnc(self, src, text, offset):
try:
compile(src, "", "exec")
except SyntaxError, e:
self.assertEqual(e.offset, offset)
self.assertEqual(e.text, text)
def testSourceCodeEncodingsError(self):
# Test SyntaxError with encoding definition
sjis = "print '\x83\x70\x83\x43\x83\x5c\x83\x93', '\n"
ascii = "print '12345678', '\n"
encdef = "#! -*- coding: ShiftJIS -*-\n"
# ascii source without encdef
self._testErrEnc(ascii, ascii, 19)
# ascii source with encdef
self._testErrEnc(encdef+ascii, ascii, 19)
# non-ascii source with encdef
self._testErrEnc(encdef+sjis, sjis, 19)
# ShiftJIS source without encdef
self._testErrEnc(sjis, sjis, 19)
NOLINENO = (compiler.ast.Module, compiler.ast.Stmt, compiler.ast.Discard)
###############################################################################
# code below is just used to trigger some possible errors, for the benefit of
# testLineNo
###############################################################################
class Toto:
"""docstring"""
pass
a, b = 2, 3
[c, d] = 5, 6
l = [(x, y) for x, y in zip(range(5), range(5,10))]
l[0]
l[3:4]
d = {'a': 2}
d = {}
d = {x: y for x, y in zip(range(5), range(5,10))}
s = {x for x in range(10)}
s = {1}
t = ()
t = (1, 2)
l = []
l = [1, 2]
if l:
pass
else:
a, b = b, a
try:
print yo
except:
yo = 3
else:
yo += 3
try:
a += b
finally:
b = 0
from math import *
###############################################################################
def test_main():
global TEST_ALL
TEST_ALL = test.test_support.is_resource_enabled("cpu")
test.test_support.run_unittest(CompilerTest)
if __name__ == "__main__":
test_main()
| mit |
abartlet/samba-old | selftest/selftest.py | 2 | 16719 | #!/usr/bin/python -u
# Bootstrap Samba and run a number of tests against it.
# Copyright (C) 2005-2012 Jelmer Vernooij <[email protected]>
# Copyright (C) 2007-2009 Stefan Metzmacher <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import atexit
from cStringIO import StringIO
import os
import sys
import signal
import subprocess
from samba import subunit
import traceback
import warnings
import optparse
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from selftest import (
socket_wrapper,
subunithelper,
testlist,
)
from selftest.client import write_clientconf
from selftest.run import (
expand_command_list,
expand_command_run,
exported_envvars_str,
now,
run_testsuite_command,
)
from selftest.target import (
EnvironmentManager,
NoneTarget,
UnsupportedEnvironment,
)
includes = ()
excludes = ()
def read_excludes(fn):
excludes.extend(testlist.read_test_regexes(fn))
def read_includes(fn):
includes.extend(testlist.read_test_regexes(fn))
parser = optparse.OptionParser("TEST-REGEXES")
parser.add_option("--target", type="choice", choices=["samba", "samba3", "none"], default="samba", help="Samba version to target")
parser.add_option("--quick", help="run quick overall test", action="store_true", default=False)
parser.add_option("--list", help="list available tests", action="store_true", default=False)
parser.add_option("--socket-wrapper", help="enable socket wrapper", action="store_true", default=False)
parser.add_option("--socket-wrapper-pcap", help="save traffic to pcap directories", type="str")
parser.add_option("--socket-wrapper-keep-pcap", help="keep all pcap files, not just those for tests that failed", action="store_true", default=False)
parser.add_option("--one", help="abort when the first test fails", action="store_true", default=False)
parser.add_option("--exclude", action="callback", help="Add file to exclude files", callback=read_excludes)
parser.add_option("--include", action="callback", help="Add file to include files", callback=read_includes)
parser.add_option("--testenv", help="run a shell in the requested test environment", action="store_true", default=False)
parser.add_option("--resetup-environment", help="Re-setup environment", action="store_true", default=False)
parser.add_option("--binary-mapping", help="Map binaries to use", type=str)
parser.add_option("--load-list", help="Load list of tests to load from a file", type=str)
parser.add_option("--prefix", help="prefix to run tests in", type=str, default="./st")
parser.add_option("--srcdir", type=str, default=".", help="source directory")
parser.add_option("--bindir", type=str, default="./bin", help="binaries directory")
parser.add_option("--testlist", type=str, action="append", help="file to read available tests from")
parser.add_option("--ldap", help="back samba onto specified ldap server", choices=["openldap", "fedora-ds"], type="choice")
opts, args = parser.parse_args()
subunit_ops = subunithelper.SubunitOps(sys.stdout)
def handle_signal(sig, frame):
sys.stderr.write("Exiting early because of signal %s.\n" % sig)
sys.exit(1)
for sig in (signal.SIGINT, signal.SIGQUIT, signal.SIGTERM, signal.SIGPIPE):
signal.signal(sig, handle_signal)
def skip(name):
return testlist.find_in_list(excludes, name)
def setup_pcap(name):
if (not opts.socket_wrapper_pcap or
not os.environ.get("SOCKET_WRAPPER_PCAP_DIR")):
return
fname = "".join([x for x in name if x.isalnum() or x == '-'])
pcap_file = os.path.join(
os.environ["SOCKET_WRAPPER_PCAP_DIR"], "%s.pcap" % fname)
socket_wrapper.setup_pcap(pcap_file)
return pcap_file
def cleanup_pcap(pcap_file, exit_code):
if not opts.socket_wrapper_pcap:
return
if opts.socket_wrapper_keep_pcap:
return
if exitcode == 0:
return
if pcap_file is None:
return
os.unlink(pcap_file)
def run_testsuite(name, cmd, subunit_ops, env=None):
"""Run a single testsuite.
:param env: Environment to run in
:param name: Name of the testsuite
:param cmd: Name of the (fully expanded) command to run
:return: exitcode of the command
"""
pcap_file = setup_pcap(name)
exitcode = run_testsuite_command(name, cmd, subunit_ops, env)
if exitcode is None:
sys.exit(1)
cleanup_pcap(pcap_file, exitcode)
if not opts.socket_wrapper_keep_pcap and pcap_file is not None:
sys.stdout.write("PCAP FILE: %s\n" % pcap_file)
if exitcode != 0 and opts.one:
sys.exit(1)
return exitcode
if opts.list and opts.testenv:
sys.stderr.write("--list and --testenv are mutually exclusive\n")
sys.exit(1)
tests = args
# quick hack to disable rpc validation when using valgrind - it is way too slow
if not os.environ.get("VALGRIND"):
os.environ["VALIDATE"] = "validate"
os.environ["MALLOC_CHECK_"] = "3"
# make all our python scripts unbuffered
os.environ["PYTHONUNBUFFERED"] = "1"
bindir_abs = os.path.abspath(opts.bindir)
# Backwards compatibility:
if os.environ.get("TEST_LDAP") == "yes":
if os.environ.get("FEDORA_DS_ROOT"):
ldap = "fedora-ds"
else:
ldap = "openldap"
torture_maxtime = int(os.getenv("TORTURE_MAXTIME", "1200"))
if opts.ldap:
# LDAP is slow
torture_maxtime *= 2
prefix = os.path.normpath(opts.prefix)
# Ensure we have the test prefix around.
#
# We need restrictive permissions on this as some subdirectories in this tree
# will have wider permissions (ie 0777) and this would allow other users on the
# host to subvert the test process.
if not os.path.isdir(prefix):
os.mkdir(prefix, 0700)
else:
os.chmod(prefix, 0700)
prefix_abs = os.path.abspath(prefix)
tmpdir_abs = os.path.abspath(os.path.join(prefix_abs, "tmp"))
if not os.path.isdir(tmpdir_abs):
os.mkdir(tmpdir_abs, 0777)
srcdir_abs = os.path.abspath(opts.srcdir)
if prefix_abs == "/":
raise Exception("using '/' as absolute prefix is a bad idea")
os.environ["PREFIX"] = prefix
os.environ["KRB5CCNAME"] = os.path.join(prefix, "krb5ticket")
os.environ["PREFIX_ABS"] = prefix_abs
os.environ["SRCDIR"] = opts.srcdir
os.environ["SRCDIR_ABS"] = srcdir_abs
os.environ["BINDIR"] = bindir_abs
tls_enabled = not opts.quick
if tls_enabled:
os.environ["TLS_ENABLED"] = "yes"
else:
os.environ["TLS_ENABLED"] = "no"
def prefix_pathvar(name, newpath):
if name in os.environ:
os.environ[name] = "%s:%s" % (newpath, os.environ[name])
else:
os.environ[name] = newpath
prefix_pathvar("PKG_CONFIG_PATH", os.path.join(bindir_abs, "pkgconfig"))
prefix_pathvar("PYTHONPATH", os.path.join(bindir_abs, "python"))
if opts.socket_wrapper_keep_pcap:
# Socket wrapper keep pcap implies socket wrapper pcap
opts.socket_wrapper_pcap = True
if opts.socket_wrapper_pcap:
# Socket wrapper pcap implies socket wrapper
opts.socket_wrapper = True
if opts.socket_wrapper:
socket_wrapper_dir = socket_wrapper.setup_dir(os.path.join(prefix_abs, "w"), opts.socket_wrapper_pcap)
sys.stdout.write("SOCKET_WRAPPER_DIR=%s\n" % socket_wrapper_dir)
elif not opts.list:
if os.getuid() != 0:
warnings.warn("not using socket wrapper, but also not running as root. Will not be able to listen on proper ports")
testenv_default = "none"
if opts.binary_mapping:
binary_mapping = dict([l.split(":") for l in opts.binary_mapping.split(",")])
os.environ["BINARY_MAPPING"] = opts.binary_mapping
else:
binary_mapping = {}
os.environ["BINARY_MAPPING"] = ""
# After this many seconds, the server will self-terminate. All tests
# must terminate in this time, and testenv will only stay alive this
# long
if os.environ.get("SMBD_MAXTIME", ""):
server_maxtime = int(os.environ["SMBD_MAXTIME"])
else:
server_maxtime = 7500
def has_socket_wrapper(bindir):
"""Check if Samba has been built with socket wrapper support.
"""
f = StringIO()
subprocess.check_call([os.path.join(bindir, "smbd"), "-b"], stdout=f)
for l in f.readlines():
if "SOCKET_WRAPPER" in l:
return True
return False
if not opts.list:
if opts.target == "samba":
if opts.socket_wrapper and not has_socket_wrapper(opts.bindir):
sys.stderr.write("You must include --enable-socket-wrapper when compiling Samba in order to execute 'make test'. Exiting....\n")
sys.exit(1)
testenv_default = "ad_dc_ntvfs"
from selftest.target.samba import Samba
target = Samba(opts.bindir, binary_mapping, ldap, opts.srcdir, server_maxtime)
elif opts.target == "samba3":
if opts.socket_wrapper and not has_socket_wrapper(opts.bindir):
sys.stderr.write("You must include --enable-socket-wrapper when compiling Samba in order to execute 'make test'. Exiting....\n")
sys.exit(1)
testenv_default = "member"
from selftest.target.samba3 import Samba3
target = Samba3(opts.bindir, binary_mapping, srcdir_abs, server_maxtime)
elif opts.target == "none":
testenv_default = "none"
target = NoneTarget()
env_manager = EnvironmentManager(target)
atexit.register(env_manager.teardown_all)
interfaces = ",".join([
"127.0.0.11/8",
"127.0.0.12/8",
"127.0.0.13/8",
"127.0.0.14/8",
"127.0.0.15/8",
"127.0.0.16/8"])
clientdir = os.path.join(prefix_abs, "client")
conffile = os.path.join(clientdir, "client.conf")
os.environ["SMB_CONF_PATH"] = conffile
todo = []
if not opts.testlist:
sys.stderr.write("No testlists specified\n")
sys.exit(1)
os.environ["SELFTEST_PREFIX"] = prefix_abs
os.environ["SELFTEST_TMPDIR"] = tmpdir_abs
os.environ["TEST_DATA_PREFIX"] = tmpdir_abs
if opts.socket_wrapper:
os.environ["SELFTEST_INTERFACES"] = interfaces
else:
os.environ["SELFTEST_INTERFACES"] = ""
if opts.quick:
os.environ["SELFTEST_QUICK"] = "1"
else:
os.environ["SELFTEST_QUICK"] = ""
os.environ["SELFTEST_MAXTIME"] = str(torture_maxtime)
available = []
for fn in opts.testlist:
for testsuite in testlist.read_testlist_file(fn):
if not testlist.should_run_test(tests, testsuite):
continue
name = testsuite[0]
if (includes is not None and
testlist.find_in_list(includes, name) is not None):
continue
available.append(testsuite)
if opts.load_list:
restricted_mgr = testlist.RestrictedTestManager.from_path(opts.load_list)
else:
restricted_mgr = None
for testsuite in available:
name = testsuite[0]
skipreason = skip(name)
if restricted_mgr is not None:
match = restricted_mgr.should_run_testsuite(name)
if match == []:
continue
else:
match = None
if skipreason is not None:
if not opts.list:
subunit_ops.skip_testsuite(name, skipreason)
else:
todo.append(testsuite + (match,))
if restricted_mgr is not None:
for name in restricted_mgr.iter_unused():
sys.stdout.write("No test or testsuite found matching %s\n" % name)
if todo == []:
sys.stderr.write("No tests to run\n")
sys.exit(1)
suitestotal = len(todo)
if not opts.list:
subunit_ops.progress(suitestotal, subunit.PROGRESS_SET)
subunit_ops.time(now())
exported_envvars = [
# domain stuff
"DOMAIN",
"REALM",
# domain controller stuff
"DC_SERVER",
"DC_SERVER_IP",
"DC_NETBIOSNAME",
"DC_NETBIOSALIAS",
# domain member
"MEMBER_SERVER",
"MEMBER_SERVER_IP",
"MEMBER_NETBIOSNAME",
"MEMBER_NETBIOSALIAS",
# rpc proxy controller stuff
"RPC_PROXY_SERVER",
"RPC_PROXY_SERVER_IP",
"RPC_PROXY_NETBIOSNAME",
"RPC_PROXY_NETBIOSALIAS",
# domain controller stuff for Vampired DC
"VAMPIRE_DC_SERVER",
"VAMPIRE_DC_SERVER_IP",
"VAMPIRE_DC_NETBIOSNAME",
"VAMPIRE_DC_NETBIOSALIAS",
# domain controller stuff for Vampired DC
"PROMOTED_DC_SERVER",
"PROMOTED_DC_SERVER_IP",
"PROMOTED_DC_NETBIOSNAME",
"PROMOTED_DC_NETBIOSALIAS",
# server stuff
"SERVER",
"SERVER_IP",
"NETBIOSNAME",
"NETBIOSALIAS",
# user stuff
"USERNAME",
"USERID",
"PASSWORD",
"DC_USERNAME",
"DC_PASSWORD",
# misc stuff
"KRB5_CONFIG",
"WINBINDD_SOCKET_DIR",
"WINBINDD_PRIV_PIPE_DIR",
"NMBD_SOCKET_DIR",
"LOCAL_PATH"
]
def switch_env(name, prefix):
if ":" in name:
(envname, option) = name.split(":", 1)
else:
envname = name
option = "client"
env = env_manager.setup_env(envname, prefix)
testenv_vars = env.get_vars()
if option == "local":
socket_wrapper.set_default_iface(testenv_vars["SOCKET_WRAPPER_DEFAULT_IFACE"])
os.environ["SMB_CONF_PATH"] = testenv_vars["SERVERCONFFILE"]
elif option == "client":
socket_wrapper.set_default_iface(11)
write_clientconf(conffile, clientdir, testenv_vars)
os.environ["SMB_CONF_PATH"] = conffile
else:
raise Exception("Unknown option[%s] for envname[%s]" % (option,
envname))
for name in exported_envvars:
if name in testenv_vars:
os.environ[name] = testenv_vars[name]
elif name in os.environ:
del os.environ[name]
return env
# This 'global' file needs to be empty when we start
dns_host_file_path = os.path.join(prefix_abs, "dns_host_file")
if os.path.exists(dns_host_file_path):
os.unlink(dns_host_file_path)
if opts.testenv:
testenv_name = os.environ.get("SELFTEST_TESTENV", testenv_default)
env = switch_env(testenv_name, prefix)
testenv_vars = env.get_vars()
os.environ["PIDDIR"] = testenv_vars["PIDDIR"]
os.environ["ENVNAME"] = testenv_name
envvarstr = exported_envvars_str(testenv_vars, exported_envvars)
term = os.environ.get("TERMINAL", "xterm -e")
cmd = """'echo -e "
Welcome to the Samba4 Test environment '%(testenv_name)'
This matches the client environment used in make test
server is pid `cat \$PIDDIR/samba.pid`
Some useful environment variables:
TORTURE_OPTIONS=\$TORTURE_OPTIONS
SMB_CONF_PATH=\$SMB_CONF_PATH
$envvarstr
\" && LD_LIBRARY_PATH=%(LD_LIBRARY_PATH)s $(SHELL)'""" % {
"testenv_name": testenv_name,
"LD_LIBRARY_PATH": os.environ["LD_LIBRARY_PATH"]}
subprocess.call(term + ' ' + cmd, shell=True)
env_manager.teardown_env(testenv_name)
elif opts.list:
for (name, envname, cmd, supports_loadfile, supports_idlist, subtests) in todo:
cmd = expand_command_list(cmd)
if cmd is None:
warnings.warn("Unable to list tests in %s" % name)
continue
exitcode = subprocess.call(cmd, shell=True)
if exitcode != 0:
sys.stderr.write("%s exited with exit code %s\n" % (cmd, exitcode))
sys.exit(1)
else:
for (name, envname, cmd, supports_loadfile, supports_idlist, subtests) in todo:
try:
env = switch_env(envname, prefix)
except UnsupportedEnvironment:
subunit_ops.start_testsuite(name)
subunit_ops.end_testsuite(name, "skip",
"environment %s is unknown in this test backend - skipping" % envname)
continue
except Exception, e:
subunit_ops.start_testsuite(name)
traceback.print_exc()
subunit_ops.end_testsuite(name, "error",
"unable to set up environment %s: %s" % (envname, e))
continue
cmd, tmpf = expand_command_run(cmd, supports_loadfile, supports_idlist,
subtests)
run_testsuite(name, cmd, subunit_ops, env=env)
if tmpf is not None:
os.remove(tmpf)
if opts.resetup_environment:
env_manager.teardown_env(envname)
env_manager.teardown_all()
sys.stdout.write("\n")
# if there were any valgrind failures, show them
for fn in os.listdir(prefix):
if fn.startswith("valgrind.log"):
sys.stdout.write("VALGRIND FAILURE\n")
f = open(os.path.join(prefix, fn), 'r')
try:
sys.stdout.write(f.read())
finally:
f.close()
sys.exit(0)
| gpl-3.0 |
mattclark/osf.io | osf_tests/test_search_views.py | 6 | 15195 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
from nose.tools import * # noqa: F403
from osf_tests import factories
from tests.base import OsfTestCase
from website.util import api_url_for
from website.views import find_bookmark_collection
@pytest.mark.enable_search
@pytest.mark.enable_enqueue_task
class TestSearchViews(OsfTestCase):
def setUp(self):
super(TestSearchViews, self).setUp()
import website.search.search as search
search.delete_all()
robbie = factories.UserFactory(fullname='Robbie Williams')
self.project = factories.ProjectFactory(creator=robbie)
self.contrib = factories.UserFactory(fullname='Brian May')
for i in range(0, 12):
factories.UserFactory(fullname='Freddie Mercury{}'.format(i))
self.user_one = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project_private_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=False)
self.project_private_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=False)
self.project_public_user_one = factories.ProjectFactory(title='aaa', creator=self.user_one, is_public=True)
self.project_public_user_two = factories.ProjectFactory(title='aaa', creator=self.user_two, is_public=True)
def tearDown(self):
super(TestSearchViews, self).tearDown()
import website.search.search as search
search.delete_all()
def test_search_views(self):
#Test search contributor
url = api_url_for('search_contributor')
res = self.app.get(url, {'query': self.contrib.fullname})
assert_equal(res.status_code, 200)
result = res.json['users']
assert_equal(len(result), 1)
brian = result[0]
assert_equal(brian['fullname'], self.contrib.fullname)
assert_in('profile_image_url', brian)
assert_equal(brian['registered'], self.contrib.is_registered)
assert_equal(brian['active'], self.contrib.is_active)
#Test search pagination
res = self.app.get(url, {'query': 'fr'})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(pages, 3)
assert_equal(page, 0)
#Test default page 1
res = self.app.get(url, {'query': 'fr', 'page': 1})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 1)
#Test default page 2
res = self.app.get(url, {'query': 'fr', 'page': 2})
assert_equal(res.status_code, 200)
result = res.json['users']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
#Test smaller pages
res = self.app.get(url, {'query': 'fr', 'size': 5})
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 5)
assert_equal(page, 0)
assert_equal(pages, 3)
#Test smaller pages page 2
res = self.app.get(url, {'query': 'fr', 'page': 2, 'size': 5, })
assert_equal(res.status_code, 200)
result = res.json['users']
pages = res.json['pages']
page = res.json['page']
assert_equal(len(result), 4)
assert_equal(page, 2)
assert_equal(pages, 3)
#Test search projects
url = '/search/'
res = self.app.get(url, {'q': self.project.title})
assert_equal(res.status_code, 200)
#Test search node
res = self.app.post_json(
api_url_for('search_node'),
{'query': self.project.title},
auth=factories.AuthUserFactory().auth
)
assert_equal(res.status_code, 200)
#Test search node includePublic true
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': True},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search node includePublic false
res = self.app.post_json(
api_url_for('search_node'),
{'query': 'a', 'includePublic': False},
auth=self.user_one.auth
)
node_ids = [node['id'] for node in res.json['nodes']]
assert_in(self.project_private_user_one._id, node_ids)
assert_in(self.project_public_user_one._id, node_ids)
assert_not_in(self.project_public_user_two._id, node_ids)
assert_not_in(self.project_private_user_two._id, node_ids)
#Test search user
url = '/api/v1/search/user/'
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_false(res.json['results'])
user_one = factories.AuthUserFactory(fullname='Joe Umwali')
user_two = factories.AuthUserFactory(fullname='Joan Uwase')
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_false(res.json['results'][0]['social'])
user_one.social = {
'github': user_one.given_name,
'twitter': user_one.given_name,
'ssrn': user_one.given_name
}
user_one.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_not_in('Joan', res.body)
assert_true(res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['names']['fullname'], user_one.fullname)
assert_equal(res.json['results'][0]['social']['github'], 'http://github.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['twitter'], 'http://twitter.com/{}'.format(user_one.given_name))
assert_equal(res.json['results'][0]['social']['ssrn'], 'http://papers.ssrn.com/sol3/cf_dev/AbsByAuth.cfm?per_id={}'.format(user_one.given_name))
user_two.social = {
'profileWebsites': ['http://me.com/{}'.format(user_two.given_name)],
'orcid': user_two.given_name,
'linkedIn': user_two.given_name,
'scholar': user_two.given_name,
'impactStory': user_two.given_name,
'baiduScholar': user_two.given_name
}
user_two.save()
user_three = factories.AuthUserFactory(fullname='Janet Umwali')
user_three.social = {
'github': user_three.given_name,
'ssrn': user_three.given_name
}
user_three.save()
res = self.app.get(url, {'q': 'Umwali'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 2)
assert_true(res.json['results'][0]['social'])
assert_true(res.json['results'][1]['social'])
assert_not_equal(res.json['results'][0]['social']['ssrn'], res.json['results'][1]['social']['ssrn'])
assert_not_equal(res.json['results'][0]['social']['github'], res.json['results'][1]['social']['github'])
res = self.app.get(url, {'q': 'Uwase'})
assert_equal(res.status_code, 200)
assert_equal(len(res.json['results']), 1)
assert_true(res.json['results'][0]['social'])
assert_not_in('ssrn', res.json['results'][0]['social'])
assert_equal(res.json['results'][0]['social']['profileWebsites'][0], 'http://me.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['impactStory'], 'https://impactstory.org/u/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['orcid'], 'http://orcid.org/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['baiduScholar'], 'http://xueshu.baidu.com/scholarID/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['linkedIn'], 'https://www.linkedin.com/{}'.format(user_two.given_name))
assert_equal(res.json['results'][0]['social']['scholar'], 'http://scholar.google.com/citations?user={}'.format(user_two.given_name))
@pytest.mark.enable_bookmark_creation
class TestODMTitleSearch(OsfTestCase):
""" Docs from original method:
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
def setUp(self):
super(TestODMTitleSearch, self).setUp()
self.user = factories.AuthUserFactory()
self.user_two = factories.AuthUserFactory()
self.project = factories.ProjectFactory(creator=self.user, title='foo')
self.project_two = factories.ProjectFactory(creator=self.user_two, title='bar')
self.public_project = factories.ProjectFactory(creator=self.user_two, is_public=True, title='baz')
self.registration_project = factories.RegistrationFactory(creator=self.user, title='qux')
self.folder = factories.CollectionFactory(creator=self.user, title='quux')
self.dashboard = find_bookmark_collection(self.user)
self.url = api_url_for('search_projects_by_title')
def test_search_projects_by_title(self):
res = self.app.get(self.url, {'term': self.project.title}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'no',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.public_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'either'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 2)
res = self.app.get(self.url,
{
'term': self.registration_project.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isRegistration': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 1)
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert len(res.json) == 0
res = self.app.get(self.url,
{
'term': self.folder.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'no'
}, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
res = self.app.get(self.url,
{
'term': self.dashboard.title,
'includePublic': 'yes',
'includeContributed': 'yes',
'isFolder': 'yes'
}, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), 0)
| apache-2.0 |
dannyperry571/theapprentice | script.module.beautifulsoup4/lib/bs4/tests/test_soup.py | 272 | 17391 | # -*- coding: utf-8 -*-
"""Tests of Beautiful Soup as a whole."""
import logging
import unittest
import sys
import tempfile
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
SoupStrainer,
NamespacedAttribute,
)
import bs4.dammit
from bs4.dammit import (
EntitySubstitution,
UnicodeDammit,
)
from bs4.testing import (
SoupTest,
skipIf,
)
import warnings
try:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
LXML_PRESENT = True
except ImportError, e:
LXML_PRESENT = False
PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
class TestConstructor(SoupTest):
def test_short_unicode_input(self):
data = u"<h1>éé</h1>"
soup = self.soup(data)
self.assertEqual(u"éé", soup.h1.string)
def test_embedded_null(self):
data = u"<h1>foo\0bar</h1>"
soup = self.soup(data)
self.assertEqual(u"foo\0bar", soup.h1.string)
class TestDeprecatedConstructorArguments(SoupTest):
def test_parseOnlyThese_renamed_to_parse_only(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
msg = str(w[0].message)
self.assertTrue("parseOnlyThese" in msg)
self.assertTrue("parse_only" in msg)
self.assertEqual(b"<b></b>", soup.encode())
def test_fromEncoding_renamed_to_from_encoding(self):
with warnings.catch_warnings(record=True) as w:
utf8 = b"\xc3\xa9"
soup = self.soup(utf8, fromEncoding="utf8")
msg = str(w[0].message)
self.assertTrue("fromEncoding" in msg)
self.assertTrue("from_encoding" in msg)
self.assertEqual("utf8", soup.original_encoding)
def test_unrecognized_keyword_argument(self):
self.assertRaises(
TypeError, self.soup, "<a>", no_such_argument=True)
class TestWarnings(SoupTest):
def test_disk_file_warning(self):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
try:
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
msg = str(w[0].message)
self.assertTrue("looks like a filename" in msg)
finally:
filehandle.close()
# The file no longer exists, so Beautiful Soup will no longer issue the warning.
with warnings.catch_warnings(record=True) as w:
soup = self.soup(filename)
self.assertEqual(0, len(w))
def test_url_warning(self):
with warnings.catch_warnings(record=True) as w:
soup = self.soup("http://www.crummy.com/")
msg = str(w[0].message)
self.assertTrue("looks like a URL" in msg)
with warnings.catch_warnings(record=True) as w:
soup = self.soup("http://www.crummy.com/ is great")
self.assertEqual(0, len(w))
class TestSelectiveParsing(SoupTest):
def test_parse_with_soupstrainer(self):
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
strainer = SoupStrainer("b")
soup = self.soup(markup, parse_only=strainer)
self.assertEqual(soup.encode(), b"<b>Yes</b><b>Yes <c>Yes</c></b>")
class TestEntitySubstitution(unittest.TestCase):
"""Standalone tests of the EntitySubstitution class."""
def setUp(self):
self.sub = EntitySubstitution
def test_simple_html_substitution(self):
# Unicode characters corresponding to named HTML entites
# are substituted, and no others.
s = u"foo\u2200\N{SNOWMAN}\u00f5bar"
self.assertEqual(self.sub.substitute_html(s),
u"foo∀\N{SNOWMAN}õbar")
def test_smart_quote_substitution(self):
# MS smart quotes are a common source of frustration, so we
# give them a special test.
quotes = b"\x91\x92foo\x93\x94"
dammit = UnicodeDammit(quotes)
self.assertEqual(self.sub.substitute_html(dammit.markup),
"‘’foo“”")
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, False), s)
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
self.assertEqual(self.sub.substitute_xml("Welcome", True),
'"Welcome"')
self.assertEqual(self.sub.substitute_xml("Bob's Bar", True),
'"Bob\'s Bar"')
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
s = 'Welcome to "my bar"'
self.assertEqual(self.sub.substitute_xml(s, True),
"'Welcome to \"my bar\"'")
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
s = 'Welcome to "Bob\'s Bar"'
self.assertEqual(
self.sub.substitute_xml(s, True),
'"Welcome to "Bob\'s Bar""')
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
quoted = 'Welcome to "Bob\'s Bar"'
self.assertEqual(self.sub.substitute_xml(quoted), quoted)
def test_xml_quoting_handles_angle_brackets(self):
self.assertEqual(
self.sub.substitute_xml("foo<bar>"),
"foo<bar>")
def test_xml_quoting_handles_ampersands(self):
self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T")
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml("ÁT&T"),
"&Aacute;T&T")
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
self.assertEqual(
self.sub.substitute_xml_containing_entities("ÁT&T"),
"ÁT&T")
def test_quotes_not_html_substituted(self):
"""There's no need to do this except inside attribute values."""
text = 'Bob\'s "bar"'
self.assertEqual(self.sub.substitute_html(text), text)
class TestEncodingConversion(SoupTest):
# Test Beautiful Soup's ability to decode and encode from various
# encodings.
def setUp(self):
super(TestEncodingConversion, self).setUp()
self.unicode_data = u'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
self.utf8_data = self.unicode_data.encode("utf-8")
# Just so you know what it looks like.
self.assertEqual(
self.utf8_data,
b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>')
def test_ascii_in_unicode_out(self):
# ASCII input is converted to Unicode. The original_encoding
# attribute is set to 'utf-8', a superset of ASCII.
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
# Disable chardet, which will realize that the ASCII is ASCII.
bs4.dammit.chardet_dammit = noop
ascii = b"<foo>a</foo>"
soup_from_ascii = self.soup(ascii)
unicode_output = soup_from_ascii.decode()
self.assertTrue(isinstance(unicode_output, unicode))
self.assertEqual(unicode_output, self.document_for(ascii.decode()))
self.assertEqual(soup_from_ascii.original_encoding.lower(), "utf-8")
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_unicode_in_unicode_out(self):
# Unicode input is left alone. The original_encoding attribute
# is not set.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.decode(), self.unicode_data)
self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!')
self.assertEqual(soup_from_unicode.original_encoding, None)
def test_utf8_in_unicode_out(self):
# UTF-8 input is converted to Unicode. The original_encoding
# attribute is set.
soup_from_utf8 = self.soup(self.utf8_data)
self.assertEqual(soup_from_utf8.decode(), self.unicode_data)
self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!')
def test_utf8_out(self):
# The internal data structures can be encoded as UTF-8.
soup_from_unicode = self.soup(self.unicode_data)
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
@skipIf(
PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
def test_attribute_name_containing_unicode_characters(self):
markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8"))
class TestUnicodeDammit(unittest.TestCase):
"""Standalone tests of UnicodeDammit."""
def test_unicode_input(self):
markup = u"I'm already Unicode! \N{SNOWMAN}"
dammit = UnicodeDammit(markup)
self.assertEqual(dammit.unicode_markup, markup)
def test_smart_quotes_to_unicode(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup)
self.assertEqual(
dammit.unicode_markup, u"<foo>\u2018\u2019\u201c\u201d</foo>")
def test_smart_quotes_to_xml_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_html_entities(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="html")
self.assertEqual(
dammit.unicode_markup, "<foo>‘’“”</foo>")
def test_smart_quotes_to_ascii(self):
markup = b"<foo>\x91\x92\x93\x94</foo>"
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
self.assertEqual(
dammit.unicode_markup, """<foo>''""</foo>""")
def test_detect_utf8(self):
utf8 = b"\xc3\xa9"
dammit = UnicodeDammit(utf8)
self.assertEqual(dammit.unicode_markup, u'\xe9')
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_convert_hebrew(self):
hebrew = b"\xed\xe5\xec\xf9"
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8')
self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9')
def test_dont_see_smart_quotes_where_there_are_none(self):
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
dammit = UnicodeDammit(utf_8)
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8)
def test_ignore_inappropriate_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_ignore_invalid_codecs(self):
utf8_data = u"Räksmörgås".encode("utf-8")
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
dammit = UnicodeDammit(utf8_data, [bad_encoding])
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
def test_detect_html5_style_meta_tag(self):
for data in (
b'<html><meta charset="euc-jp" /></html>',
b"<html><meta charset='euc-jp' /></html>",
b"<html><meta charset=euc-jp /></html>",
b"<html><meta charset=euc-jp/></html>"):
dammit = UnicodeDammit(data, is_html=True)
self.assertEqual(
"euc-jp", dammit.original_encoding)
def test_last_ditch_entity_replacement(self):
# This is a UTF-8 document that contains bytestrings
# completely incompatible with UTF-8 (ie. encoded with some other
# encoding).
#
# Since there is no consistent encoding for the document,
# Unicode, Dammit will eventually encode the document as UTF-8
# and encode the incompatible characters as REPLACEMENT
# CHARACTER.
#
# If chardet is installed, it will detect that the document
# can be converted into ISO-8859-1 without errors. This happens
# to be the wrong encoding, but it is a consistent encoding, so the
# code we're testing here won't run.
#
# So we temporarily disable chardet if it's present.
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
<html><b>\330\250\330\252\330\261</b>
<i>\310\322\321\220\312\321\355\344</i></html>"""
chardet = bs4.dammit.chardet_dammit
logging.disable(logging.WARNING)
try:
def noop(str):
return None
bs4.dammit.chardet_dammit = noop
dammit = UnicodeDammit(doc)
self.assertEqual(True, dammit.contains_replacement_characters)
self.assertTrue(u"\ufffd" in dammit.unicode_markup)
soup = BeautifulSoup(doc, "html.parser")
self.assertTrue(soup.contains_replacement_characters)
finally:
logging.disable(logging.NOTSET)
bs4.dammit.chardet_dammit = chardet
def test_byte_order_mark_removed(self):
# A document written in UTF-16LE will have its byte order marker stripped.
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
dammit = UnicodeDammit(data)
self.assertEqual(u"<a>áé</a>", dammit.unicode_markup)
self.assertEqual("utf-16le", dammit.original_encoding)
def test_detwingle(self):
# Here's a UTF8 document.
utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8")
# Here's a Windows-1252 document.
windows_1252 = (
u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
# Through some unholy alchemy, they've been stuck together.
doc = utf8 + windows_1252 + utf8
# The document can't be turned into UTF-8:
self.assertRaises(UnicodeDecodeError, doc.decode, "utf8")
# Unicode, Dammit thinks the whole document is Windows-1252,
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
# But if we run it through fix_embedded_windows_1252, it's fixed:
fixed = UnicodeDammit.detwingle(doc)
self.assertEqual(
u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8"))
def test_detwingle_ignores_multibyte_characters(self):
# Each of these characters has a UTF-8 representation ending
# in \x93. \x93 is a smart quote if interpreted as
# Windows-1252. But our code knows to skip over multibyte
# UTF-8 characters, so they'll survive the process unscathed.
for tricky_unicode_char in (
u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
):
input = tricky_unicode_char.encode("utf8")
self.assertTrue(input.endswith(b'\x93'))
output = UnicodeDammit.detwingle(input)
self.assertEqual(output, input)
class TestNamedspacedAttribute(SoupTest):
def test_name_may_be_none(self):
a = NamespacedAttribute("xmlns", None)
self.assertEqual(a, "xmlns")
def test_attribute_is_equivalent_to_colon_separated_string(self):
a = NamespacedAttribute("a", "b")
self.assertEqual("a:b", a)
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
a = NamespacedAttribute("a", "b", "c")
b = NamespacedAttribute("a", "b", "c")
self.assertEqual(a, b)
# The actual namespace is not considered.
c = NamespacedAttribute("a", "b", None)
self.assertEqual(a, c)
# But name and prefix are important.
d = NamespacedAttribute("a", "z", "c")
self.assertNotEqual(a, d)
e = NamespacedAttribute("z", "b", "c")
self.assertNotEqual(a, e)
class TestAttributeValueWithCharsetSubstitution(unittest.TestCase):
def test_content_meta_attribute_value(self):
value = CharsetMetaAttributeValue("euc-jp")
self.assertEqual("euc-jp", value)
self.assertEqual("euc-jp", value.original_value)
self.assertEqual("utf8", value.encode("utf8"))
def test_content_meta_attribute_value(self):
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
self.assertEqual("text/html; charset=euc-jp", value)
self.assertEqual("text/html; charset=euc-jp", value.original_value)
self.assertEqual("text/html; charset=utf8", value.encode("utf8"))
| gpl-2.0 |
quantwizard-com/pythonbacktest | pythonbacktest/visualization/abstractdatavisualization.py | 1 | 1666 | import abc
class AbstractDataVisualization(object):
def __init__(self):
self.__indicators_history = None
self.__indicators_name_collections = None
self.__recorded_transaction_names = []
self.__trade_transactions = {}
def add_indicators_history(self, indicators_history, *indicators_name_collections):
self.__indicators_history = indicators_history
self.__indicators_name_collections = indicators_name_collections
def add_transactions_from_trade_log(self, trade_log, *transaction_names):
self.__recorded_transaction_names = list(transaction_names)
for trade_record in trade_log.all_transactions:
transaction_type = trade_record.transaction_type
transaction_price_per_share = trade_record.transaction_price_per_share
price_bar_index = trade_record.price_bar_index_per_day
if transaction_type in transaction_names:
if transaction_type in self.__trade_transactions:
self.__trade_transactions[transaction_type].append((price_bar_index, transaction_price_per_share))
else:
self.__trade_transactions[transaction_type] = list([(price_bar_index, transaction_price_per_share)])
@property
def trade_transactions(self):
return self.__trade_transactions
@property
def recorded_transaction_names(self):
return self.__recorded_transaction_names
@property
def indicators_name_collections(self):
return self.__indicators_name_collections
@property
def indicators_history(self):
return self.__indicators_history
| apache-2.0 |
tdtrask/ansible | lib/ansible/plugins/lookup/csvfile.py | 22 | 4790 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
options:
col:
description: column to return (0 index).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
description: field separator in the file, for a tab you can specify "TAB" or "t".
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimeted) not CSV (comma delimted) ... yes the name is misleading.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
"""
import codecs
import csv
from collections import MutableSequence
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils._text import to_bytes, to_native, to_text
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
f = CSVRecoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [to_text(s) for s in row]
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'r')
creader = CSVReader(f, delimiter=to_bytes(delimiter), encoding=encoding)
for row in creader:
if row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col': "1", # column to return
'default': None,
'delimiter': "TAB",
'file': 'ansible.csv',
'encoding': 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 |
kbkpbot/K860i_kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
petrus-v/odoo | addons/lunch/wizard/lunch_validation.py | 440 | 1296 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_validation(osv.Model):
""" lunch validation """
_name = 'lunch.validation'
_description = 'lunch validation for order'
def confirm(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').confirm(cr, uid, ids, context=context)
| agpl-3.0 |
rsheftel/pandas_market_calendars | tests/test_eurex_calendar.py | 1 | 1290 | import pandas as pd
import pytz
from pandas_market_calendars.exchange_calendar_eurex import EUREXExchangeCalendar
def test_time_zone():
assert EUREXExchangeCalendar().tz == pytz.timezone('Europe/Berlin')
assert EUREXExchangeCalendar().name == 'EUREX'
def test_2016_holidays():
# good friday: 2016-03-25
# May 1st: on a weekend, not rolled forward
# christmas: on a weekend, not rolled forward
# boxing day: 2016-12-26
# new years (observed): 2016-01-01
eurex = EUREXExchangeCalendar()
good_dates = eurex.valid_days('2016-01-01', '2016-12-31')
for date in ["2016-03-25", "2016-01-01", "2016-12-26"]:
assert pd.Timestamp(date, tz='UTC') not in good_dates
for date in ['2016-05-02']:
assert pd.Timestamp(date, tz='UTC') in good_dates
def test_2017_holidays():
# good friday: 2017-04-14
# May 1st: 2017-05-01
# christmas (observed): 2017-12-25
# new years (observed): on a weekend, not rolled forward
eurex = EUREXExchangeCalendar()
good_dates = eurex.valid_days('2017-01-01', '2017-12-31')
for date in ["2016-04-14", "2017-05-01", "2017-12-25"]:
assert pd.Timestamp(date, tz='UTC') not in good_dates
for date in ["2017-01-02"]:
assert pd.Timestamp(date, tz='UTC') in good_dates
| mit |
wimnat/ansible | lib/ansible/module_utils/facts/network/nvme.py | 92 | 1999 | # NVMe initiator related facts collection for Ansible.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import subprocess
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.network.base import NetworkCollector
class NvmeInitiatorNetworkCollector(NetworkCollector):
name = 'nvme'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
"""
Currently NVMe is only supported in some Linux distributions.
If NVMe is configured on the host then a file will have been created
during the NVMe driver installation. This file holds the unique NQN
of the host.
Example of contents of /etc/nvme/hostnqn:
# cat /etc/nvme/hostnqn
nqn.2014-08.org.nvmexpress:fc_lif:uuid:2cd61a74-17f9-4c22-b350-3020020c458d
"""
nvme_facts = {}
nvme_facts['hostnqn'] = ""
if sys.platform.startswith('linux'):
for line in get_file_content('/etc/nvme/hostnqn', '').splitlines():
if line.startswith('#') or line.startswith(';') or line.strip() == '':
continue
if line.startswith('nqn.'):
nvme_facts['hostnqn'] = line
break
return nvme_facts
| gpl-3.0 |
hfp/tensorflow-xsmm | tensorflow/python/kernel_tests/summary_v1_ops_test.py | 36 | 4072 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the actual serialized proto output of the V1 tf.summary ops.
The tensor, audio, and image ops have dedicated tests in adjacent files. The
overall tf.summary API surface also has its own tests in summary_test.py that
check calling the API methods but not the exact serialized proto output.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import logging_ops
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
class SummaryV1OpsTest(test.TestCase):
def _AsSummary(self, s):
summ = summary_pb2.Summary()
summ.ParseFromString(s)
return summ
def testScalarSummary(self):
with self.cached_session() as sess:
const = constant_op.constant([10.0, 20.0])
summ = logging_ops.scalar_summary(["c1", "c2"], const, name="mysumm")
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
def testScalarSummaryDefaultName(self):
with self.cached_session() as sess:
const = constant_op.constant([10.0, 20.0])
summ = logging_ops.scalar_summary(["c1", "c2"], const)
value = self.evaluate(summ)
self.assertEqual([], summ.get_shape())
self.assertProtoEquals("""
value { tag: "c1" simple_value: 10.0 }
value { tag: "c2" simple_value: 20.0 }
""", self._AsSummary(value))
@test_util.run_deprecated_v1
def testMergeSummary(self):
with self.cached_session() as sess:
const = constant_op.constant(10.0)
summ1 = summary.histogram("h", const)
summ2 = logging_ops.scalar_summary("c", const)
merge = summary.merge([summ1, summ2])
value = self.evaluate(merge)
self.assertEqual([], merge.get_shape())
self.assertProtoEquals("""
value {
tag: "h"
histo {
min: 10.0
max: 10.0
num: 1.0
sum: 10.0
sum_squares: 100.0
bucket_limit: 9.93809490288
bucket_limit: 10.9319043932
bucket_limit: 1.7976931348623157e+308
bucket: 0.0
bucket: 1.0
bucket: 0.0
}
}
value { tag: "c" simple_value: 10.0 }
""", self._AsSummary(value))
def testMergeAllSummaries(self):
with ops.Graph().as_default():
const = constant_op.constant(10.0)
summ1 = summary.histogram("h", const)
summ2 = summary.scalar("o", const, collections=["foo_key"])
summ3 = summary.scalar("c", const)
merge = summary.merge_all()
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(2, len(merge.op.inputs))
self.assertEqual(summ1, merge.op.inputs[0])
self.assertEqual(summ3, merge.op.inputs[1])
merge = summary.merge_all("foo_key")
self.assertEqual("MergeSummary", merge.op.type)
self.assertEqual(1, len(merge.op.inputs))
self.assertEqual(summ2, merge.op.inputs[0])
self.assertTrue(summary.merge_all("bar_key") is None)
if __name__ == "__main__":
test.main()
| apache-2.0 |
wulin9005/cocosbuilder | CocosBuilder/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 151 | 1920 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
| mit |
CouchPotato/CouchPotatoV1 | cherrypy/_cpmodpy.py | 82 | 11201 | """Native adapter for serving CherryPy via mod_python
Basic usage:
##########################################
# Application in a module called myapp.py
##########################################
import cherrypy
class Root:
@cherrypy.expose
def index(self):
return 'Hi there, Ho there, Hey there'
# We will use this method from the mod_python configuration
# as the entry point to our application
def setup_server():
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'production',
'log.screen': False,
'show_tracebacks': False})
##########################################
# mod_python settings for apache2
# This should reside in your httpd.conf
# or a file that will be loaded at
# apache startup
##########################################
# Start
DocumentRoot "/"
Listen 8080
LoadModule python_module /usr/lib/apache2/modules/mod_python.so
<Location "/">
PythonPath "sys.path+['/path/to/my/application']"
SetHandler python-program
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup myapp::setup_server
PythonDebug On
</Location>
# End
The actual path to your mod_python.so is dependent on your
environment. In this case we suppose a global mod_python
installation on a Linux distribution such as Ubuntu.
We do set the PythonPath configuration setting so that
your application can be found by from the user running
the apache2 instance. Of course if your application
resides in the global site-package this won't be needed.
Then restart apache2 and access http://127.0.0.1:8080
"""
import logging
import sys
import cherrypy
from cherrypy._cpcompat import BytesIO, copyitems, ntob
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
# ------------------------------ Request-handling
def setup(req):
from mod_python import apache
# Run any setup functions defined by a "PythonOption cherrypy.setup" directive.
options = req.get_options()
if 'cherrypy.setup' in options:
for function in options['cherrypy.setup'].split():
atoms = function.split('::', 1)
if len(atoms) == 1:
mod = __import__(atoms[0], globals(), locals())
else:
modname, fname = atoms
mod = __import__(modname, globals(), locals(), [fname])
func = getattr(mod, fname)
func()
cherrypy.config.update({'log.screen': False,
"tools.ignore_headers.on": True,
"tools.ignore_headers.headers": ['Range'],
})
engine = cherrypy.engine
if hasattr(engine, "signal_handler"):
engine.signal_handler.unsubscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.unsubscribe()
engine.autoreload.unsubscribe()
cherrypy.server.unsubscribe()
def _log(msg, level):
newlevel = apache.APLOG_ERR
if logging.DEBUG >= level:
newlevel = apache.APLOG_DEBUG
elif logging.INFO >= level:
newlevel = apache.APLOG_INFO
elif logging.WARNING >= level:
newlevel = apache.APLOG_WARNING
# On Windows, req.server is required or the msg will vanish. See
# http://www.modpython.org/pipermail/mod_python/2003-October/014291.html.
# Also, "When server is not specified...LogLevel does not apply..."
apache.log_error(msg, newlevel, req.server)
engine.subscribe('log', _log)
engine.start()
def cherrypy_cleanup(data):
engine.exit()
try:
# apache.register_cleanup wasn't available until 3.1.4.
apache.register_cleanup(cherrypy_cleanup)
except AttributeError:
req.server.register_cleanup(req, cherrypy_cleanup)
class _ReadOnlyRequest:
expose = ('read', 'readline', 'readlines')
def __init__(self, req):
for method in self.expose:
self.__dict__[method] = getattr(req, method)
recursive = False
_isSetUp = False
def handler(req):
from mod_python import apache
try:
global _isSetUp
if not _isSetUp:
setup(req)
_isSetUp = True
# Obtain a Request object from CherryPy
local = req.connection.local_addr
local = httputil.Host(local[0], local[1], req.connection.local_host or "")
remote = req.connection.remote_addr
remote = httputil.Host(remote[0], remote[1], req.connection.remote_host or "")
scheme = req.parsed_uri[0] or 'http'
req.get_basic_auth_pw()
try:
# apache.mpm_query only became available in mod_python 3.1
q = apache.mpm_query
threaded = q(apache.AP_MPMQ_IS_THREADED)
forked = q(apache.AP_MPMQ_IS_FORKED)
except AttributeError:
bad_value = ("You must provide a PythonOption '%s', "
"either 'on' or 'off', when running a version "
"of mod_python < 3.1")
threaded = options.get('multithread', '').lower()
if threaded == 'on':
threaded = True
elif threaded == 'off':
threaded = False
else:
raise ValueError(bad_value % "multithread")
forked = options.get('multiprocess', '').lower()
if forked == 'on':
forked = True
elif forked == 'off':
forked = False
else:
raise ValueError(bad_value % "multiprocess")
sn = cherrypy.tree.script_name(req.uri or "/")
if sn is None:
send_response(req, '404 Not Found', [], '')
else:
app = cherrypy.tree.apps[sn]
method = req.method
path = req.uri
qs = req.args or ""
reqproto = req.protocol
headers = copyitems(req.headers_in)
rfile = _ReadOnlyRequest(req)
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(local, remote, scheme,
"HTTP/1.1")
request.login = req.user
request.multithread = bool(threaded)
request.multiprocess = bool(forked)
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the response
try:
request.run(method, path, qs, reqproto, headers, rfile)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not recursive:
if ir.path in redirections:
raise RuntimeError("InternalRedirector visited the "
"same URL twice: %r" % ir.path)
else:
# Add the *previous* path_info + qs to redirections.
if qs:
qs = "?" + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = "GET"
path = ir.path
qs = ir.query_string
rfile = BytesIO()
send_response(req, response.output_status, response.header_list,
response.body, response.stream)
finally:
app.release_serving()
except:
tb = format_exc()
cherrypy.log(tb, 'MOD_PYTHON', severity=logging.ERROR)
s, h, b = bare_error()
send_response(req, s, h, b)
return apache.OK
def send_response(req, status, headers, body, stream=False):
# Set response status
req.status = int(status[:3])
# Set response headers
req.content_type = "text/plain"
for header, value in headers:
if header.lower() == 'content-type':
req.content_type = value
continue
req.headers_out.add(header, value)
if stream:
# Flush now so the status and headers are sent immediately.
req.flush()
# Set response body
if isinstance(body, basestring):
req.write(body)
else:
for seg in body:
req.write(seg)
# --------------- Startup tools for CherryPy + mod_python --------------- #
import os
import re
try:
import subprocess
def popen(fullcmd):
p = subprocess.Popen(fullcmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
close_fds=True)
return p.stdout
except ImportError:
def popen(fullcmd):
pipein, pipeout = os.popen4(fullcmd)
return pipeout
def read_process(cmd, args=""):
fullcmd = "%s %s" % (cmd, args)
pipeout = popen(fullcmd)
try:
firstline = pipeout.readline()
if (re.search(ntob("(not recognized|No such file|not found)"), firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
class ModPythonServer(object):
template = """
# Apache2 server configuration file for running CherryPy with mod_python.
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
<Location %(loc)s>
SetHandler python-program
PythonHandler %(handler)s
PythonDebug On
%(opts)s
</Location>
"""
def __init__(self, loc="/", port=80, opts=None, apache_path="apache",
handler="cherrypy._cpmodpy::handler"):
self.loc = loc
self.port = port
self.opts = opts
self.apache_path = apache_path
self.handler = handler
def start(self):
opts = "".join([" PythonOption %s %s\n" % (k, v)
for k, v in self.opts])
conf_data = self.template % {"port": self.port,
"loc": self.loc,
"opts": opts,
"handler": self.handler,
}
mpconf = os.path.join(os.path.dirname(__file__), "cpmodpy.conf")
f = open(mpconf, 'wb')
try:
f.write(conf_data)
finally:
f.close()
response = read_process(self.apache_path, "-k start -f %s" % mpconf)
self.ready = True
return response
def stop(self):
os.popen("apache -k stop")
self.ready = False
| gpl-3.0 |
kennedyshead/home-assistant | tests/components/gree/test_switch.py | 2 | 3275 | """Tests for gree component."""
from greeclimate.exceptions import DeviceTimeoutError
from homeassistant.components.gree.const import DOMAIN as GREE_DOMAIN
from homeassistant.components.switch import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
ENTITY_ID = f"{DOMAIN}.fake_device_1_panel_light"
async def async_setup_gree(hass):
"""Set up the gree switch platform."""
MockConfigEntry(domain=GREE_DOMAIN).add_to_hass(hass)
await async_setup_component(hass, GREE_DOMAIN, {GREE_DOMAIN: {DOMAIN: {}}})
await hass.async_block_till_done()
async def test_send_panel_light_on(hass):
"""Test for sending power on command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_ON
async def test_send_panel_light_on_device_timeout(hass, device):
"""Test for sending power on command to the device with a device timeout."""
device().push_state_update.side_effect = DeviceTimeoutError
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_ON
async def test_send_panel_light_off(hass):
"""Test for sending power on command to the device."""
await async_setup_gree(hass)
assert await hass.services.async_call(
DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_OFF
async def test_send_panel_light_toggle(hass):
"""Test for sending power on command to the device."""
await async_setup_gree(hass)
# Turn the service on first
assert await hass.services.async_call(
DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_ON
# Toggle it off
assert await hass.services.async_call(
DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_OFF
# Toggle is back on
assert await hass.services.async_call(
DOMAIN,
SERVICE_TOGGLE,
{ATTR_ENTITY_ID: ENTITY_ID},
blocking=True,
)
state = hass.states.get(ENTITY_ID)
assert state is not None
assert state.state == STATE_ON
async def test_panel_light_name(hass):
"""Test for name property."""
await async_setup_gree(hass)
state = hass.states.get(ENTITY_ID)
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake-device-1 Panel Light"
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.