repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
dabiboo/youtube-dl | youtube_dl/extractor/trutube.py | 147 | 1354 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import xpath_text
class TruTubeIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?trutube\.tv/(?:video/|nuevo/player/embed\.php\?v=)(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://trutube.tv/video/14880/Ramses-II-Proven-To-Be-A-Red-Headed-Caucasoid-',
'md5': 'c5b6e301b0a2040b074746cbeaa26ca1',
'info_dict': {
'id': '14880',
'ext': 'flv',
'title': 'Ramses II - Proven To Be A Red Headed Caucasoid',
'thumbnail': 're:^http:.*\.jpg$',
}
}, {
'url': 'https://trutube.tv/nuevo/player/embed.php?v=14880',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
config = self._download_xml(
'https://trutube.tv/nuevo/player/config.php?v=%s' % video_id,
video_id, transform_source=lambda s: s.strip())
# filehd is always 404
video_url = xpath_text(config, './file', 'video URL', fatal=True)
title = xpath_text(config, './title', 'title').strip()
thumbnail = xpath_text(config, './image', ' thumbnail')
return {
'id': video_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
}
| unlicense |
SebastienBocquet/ConvertibleUAV | Tools/MAVLink/mavlink/pymavlink/mavwp.py | 5 | 13222 | '''
module for loading/saving waypoints
'''
import mavutil, time, copy
import logging
import mavutil
try:
from google.protobuf import text_format
import mission_pb2
HAVE_PROTOBUF = True
except ImportError:
HAVE_PROTOBUF = False
class MAVWPError(Exception):
'''MAVLink WP error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVWPLoader(object):
'''MAVLink waypoint loader'''
def __init__(self, target_system=0, target_component=0):
self.wpoints = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of waypoints'''
return len(self.wpoints)
def wp(self, i):
'''return a waypoint'''
return self.wpoints[i]
def add(self, w, comment=''):
'''add a waypoint'''
w = copy.copy(w)
if comment:
w.comment = comment
w.seq = self.count()
self.wpoints.append(w)
self.last_change = time.time()
def set(self, w, idx):
'''set a waypoint'''
w.seq = idx
if w.seq == self.count():
return self.add(w)
if self.count() <= idx:
raise MAVWPError('adding waypoint at idx=%u past end of list (count=%u)' % (idx, self.count()))
self.wpoints[idx] = w
self.last_change = time.time()
def remove(self, w):
'''remove a waypoint'''
self.wpoints.remove(w)
self.last_change = time.time()
def clear(self):
'''clear waypoint list'''
self.wpoints = []
self.last_change = time.time()
def _read_waypoints_v100(self, file):
'''read a version 100 waypoint'''
cmdmap = {
2 : mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
3 : mavutil.mavlink.MAV_CMD_NAV_RETURN_TO_LAUNCH,
4 : mavutil.mavlink.MAV_CMD_NAV_LAND,
24: mavutil.mavlink.MAV_CMD_NAV_TAKEOFF,
26: mavutil.mavlink.MAV_CMD_NAV_LAND,
25: mavutil.mavlink.MAV_CMD_NAV_WAYPOINT ,
27: mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM
}
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 13:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[1]), # frame
int(a[2]), # action
int(a[7]), # current
int(a[12]), # autocontinue
float(a[5]), # param1,
float(a[6]), # param2,
float(a[3]), # param3
float(a[4]), # param4
float(a[9]), # x, latitude
float(a[8]), # y, longitude
float(a[10]) # z
)
if not w.command in cmdmap:
raise MAVWPError("Unknown v100 waypoint action %u" % w.command)
w.command = cmdmap[w.command]
self.add(w, comment)
comment = ''
def _read_waypoints_v110(self, file):
'''read a version 110 waypoint'''
comment = ''
for line in file:
if line.startswith('#'):
comment = line[1:].lstrip()
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 12:
raise MAVWPError("invalid waypoint line with %u values" % len(a))
if mavutil.mavlink10():
fn = mavutil.mavlink.MAVLink_mission_item_message
else:
fn = mavutil.mavlink.MAVLink_waypoint_message
w = fn(self.target_system, self.target_component,
int(a[0]), # seq
int(a[2]), # frame
int(a[3]), # command
int(a[1]), # current
int(a[11]), # autocontinue
float(a[4]), # param1,
float(a[5]), # param2,
float(a[6]), # param3
float(a[7]), # param4
float(a[8]), # x (latitude)
float(a[9]), # y (longitude)
float(a[10]) # z (altitude)
)
if w.command == 0 and w.seq == 0 and self.count() == 0:
# special handling for Mission Planner created home wp
w.command = mavutil.mavlink.MAV_CMD_NAV_WAYPOINT
self.add(w, comment)
comment = ''
def _read_waypoints_pb_110(self, file):
if not HAVE_PROTOBUF:
raise MAVWPError(
'Cannot read mission file in protobuf format without protobuf '
'library. Try "easy_install protobuf".')
explicit_seq = False
warned_seq = False
mission = mission_pb2.Mission()
text_format.Merge(file.read(), mission)
defaults = mission_pb2.Waypoint()
# Set defaults (may be overriden in file).
defaults.current = False
defaults.autocontinue = True
defaults.param1 = 0.0
defaults.param2 = 0.0
defaults.param3 = 0.0
defaults.param4 = 0.0
defaults.x = 0.0
defaults.y = 0.0
defaults.z = 0.0
# Use defaults specified in mission file, if there are any.
if mission.defaults:
defaults.MergeFrom(mission.defaults)
for seq, waypoint in enumerate(mission.waypoint):
# Consecutive sequence numbers are automatically assigned
# UNLESS the mission file specifies sequence numbers of
# its own.
if waypoint.seq:
explicit_seq = True
else:
if explicit_seq and not warned_seq:
logging.warn(
'Waypoint file %s: mixes explicit and implicit '
'sequence numbers' % (file,))
warned_seq = True
# The first command has current=True, the rest have current=False.
if seq > 0:
current = defaults.current
else:
current = True
w = mavutil.mavlink.MAVLink_mission_item_message(
self.target_system, self.target_component,
waypoint.seq or seq,
waypoint.frame,
waypoint.command,
waypoint.current or current,
waypoint.autocontinue or defaults.autocontinue,
waypoint.param1 or defaults.param1,
waypoint.param2 or defaults.param2,
waypoint.param3 or defaults.param3,
waypoint.param4 or defaults.param4,
waypoint.x or defaults.x,
waypoint.y or defaults.y,
waypoint.z or defaults.z)
self.add(w)
def load(self, filename):
'''load waypoints from a file.
returns number of waypoints loaded'''
f = open(filename, mode='r')
version_line = f.readline().strip()
if version_line == "QGC WPL 100":
readfn = self._read_waypoints_v100
elif version_line == "QGC WPL 110":
readfn = self._read_waypoints_v110
elif version_line == "QGC WPL PB 110":
readfn = self._read_waypoints_pb_110
else:
f.close()
raise MAVWPError("Unsupported waypoint format '%s'" % version_line)
self.clear()
readfn(f)
f.close()
return len(self.wpoints)
def save_as_pb(self, filename):
mission = mission_pb2.Mission()
for w in self.wpoints:
waypoint = mission.waypoint.add()
waypoint.command = w.command
waypoint.frame = w.frame
waypoint.seq = w.seq
waypoint.current = w.current
waypoint.autocontinue = w.autocontinue
waypoint.param1 = w.param1
waypoint.param2 = w.param2
waypoint.param3 = w.param3
waypoint.param4 = w.param4
waypoint.x = w.x
waypoint.y = w.y
waypoint.z = w.z
with open(filename, 'w') as f:
f.write('QGC WPL PB 110\n')
f.write(text_format.MessageToString(mission))
def save(self, filename):
'''save waypoints to a file'''
f = open(filename, mode='w')
f.write("QGC WPL 110\n")
for w in self.wpoints:
if getattr(w, 'comment', None):
f.write("# %s\n" % w.comment)
f.write("%u\t%u\t%u\t%u\t%f\t%f\t%f\t%f\t%f\t%f\t%f\t%u\n" % (
w.seq, w.current, w.frame, w.command,
w.param1, w.param2, w.param3, w.param4,
w.x, w.y, w.z, w.autocontinue))
f.close()
def polygon(self, done=None):
'''return a polygon for the waypoints'''
points = []
if done is None:
done = set()
idx = 0
# find first point not done yet
while idx < self.count():
if not idx in done:
break
idx += 1
while idx < self.count():
w = self.wp(idx)
if idx in done:
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
break
done.add(idx)
if w.command == mavutil.mavlink.MAV_CMD_DO_JUMP:
idx = int(w.param1)
w = self.wp(idx)
if w.x != 0 or w.y != 0:
points.append((w.x, w.y))
continue
idx += 1
if (w.x != 0 or w.y != 0) and w.command in [mavutil.mavlink.MAV_CMD_NAV_WAYPOINT,
mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TURNS,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME,
mavutil.mavlink.MAV_CMD_NAV_LAND,
mavutil.mavlink.MAV_CMD_NAV_TAKEOFF]:
points.append((w.x, w.y))
return points
def polygon_list(self):
'''return a list of polygons for the waypoints'''
done = set()
ret = []
while len(done) != self.count():
p = self.polygon(done)
if len(p) > 0:
ret.append(p)
return ret
class MAVFenceError(Exception):
'''MAVLink fence error class'''
def __init__(self, msg):
Exception.__init__(self, msg)
self.message = msg
class MAVFenceLoader(object):
'''MAVLink geo-fence loader'''
def __init__(self, target_system=0, target_component=0):
self.points = []
self.target_system = target_system
self.target_component = target_component
self.last_change = time.time()
def count(self):
'''return number of points'''
return len(self.points)
def point(self, i):
'''return a point'''
return self.points[i]
def add(self, p):
'''add a point'''
self.points.append(p)
self.last_change = time.time()
def clear(self):
'''clear point list'''
self.points = []
self.last_change = time.time()
def load(self, filename):
'''load points from a file.
returns number of points loaded'''
f = open(filename, mode='r')
self.clear()
for line in f:
if line.startswith('#'):
continue
line = line.strip()
if not line:
continue
a = line.split()
if len(a) != 2:
raise MAVFenceError("invalid fence point line: %s" % line)
p = mavutil.mavlink.MAVLink_fence_point_message(self.target_system, self.target_component,
self.count(), 0, float(a[0]), float(a[1]))
self.add(p)
f.close()
for i in range(self.count()):
self.points[i].count = self.count()
return len(self.points)
def save(self, filename):
'''save fence points to a file'''
f = open(filename, mode='w')
for p in self.points:
f.write("%f\t%f\n" % (p.lat, p.lng))
f.close()
def polygon(self):
'''return a polygon for the fence'''
points = []
for fp in self.points[1:]:
points.append((fp.lat, fp.lng))
return points
| gpl-3.0 |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/konlpy/corpus.py | 1 | 1849 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import os
from . import utils
class CorpusLoader():
"""Loader for corpora.
For a complete list of corpora available in KoNLPy,
refer to :ref:`corpora`.
.. code-block:: python
>>> from konlpy.corpus import kolaw
>>> fids = kolaw.fileids()
>>> fobj = kolaw.open(fids[0])
>>> print fobj.read(140)
๋ํ๋ฏผ๊ตญํ๋ฒ
์ ๊ตฌํ ์ญ์ฌ์ ์ ํต์ ๋น๋๋ ์ฐ๋ฆฌ ๋ํ๊ตญ๋ฏผ์ 3ยท1์ด๋์ผ๋ก ๊ฑด๋ฆฝ๋ ๋ํ๋ฏผ๊ตญ์์์ ๋ถ์ ๋ฒํต๊ณผ ๋ถ์์ ํญ๊ฑฐํ 4ยท19๋ฏผ์ฃผ์ด๋
์ ๊ณ์นํ๊ณ , ์กฐ๊ตญ์ ๋ฏผ์ฃผ๊ฐํ๊ณผ ํํ์ ํต์ผ์ ์ฌ๋ช
์ ์
๊ฐํ์ฌ ์ ์ยท์ธ๋์ ๋ํฌ์ ๋ก์จ ๋ฏผ์กฑ์ ๋จ๊ฒฐ์ ๊ณต๊ณ ํ ํ๊ณ , ๋ชจ๋ ์ฌํ์ ํ์ต๊ณผ ๋ถ์๋ฅผ ํํํ๋ฉฐ, ์์จ๊ณผ ์กฐํ๋ฅผ ๋ฐ ๋ฐ
"""
def abspath(self, filename=None):
"""Absolute path of corpus file.
If ``filename`` is *None*, returns absolute path of corpus.
:param filename: Name of a particular file in the corpus.
"""
basedir = '%s/data/corpus/%s' % (utils.installpath, self.name)
if filename:
return '%s/%s' % (basedir, filename)
else:
return '%s/' % basedir
def fileids(self):
"""List of file IDs in the corpus."""
return os.listdir(self.abspath())
def open(self, filename):
"""Method to open a file in the corpus.
Returns a file object.
:param filename: Name of a particular file in the corpus.
"""
return utils.load_txt(self.abspath(filename))
def __init__(self, name=None):
if not name:
raise Exception("You need to input the name of the corpus")
else:
self.name = name
kolaw = CorpusLoader('kolaw')
kobill = CorpusLoader('kobill')
| gpl-3.0 |
ErickMurillo/aprocacaho | organizacion/admin.py | 1 | 3456 | from django.contrib import admin
from .models import *
# Register your models here.
#organizacion
class InlineEscuelaCampo(admin.TabularInline):
model = EscuelaCampo
extra = 1
class OrganizacionAdmin(admin.ModelAdmin):
inlines = [InlineEscuelaCampo]
list_display = ('id','nombre','siglas')
list_display_links = ('id','nombre','siglas')
#encuesta organizacion
class InlineAspectosJuridicos(admin.TabularInline):
model = AspectosJuridicos
max_num = 1
can_delete = False
class InlineListaMiembros(admin.TabularInline):
model = ListaMiembros
extra = 1
class InlineDocumentacion(admin.TabularInline):
model = Documentacion
extra = 1
max_num = 7
class InlineProduccionComercializacion(admin.TabularInline):
model = ProduccionComercializacion
extra = 1
class InlineNivelCumplimiento(admin.TabularInline):
model = NivelCumplimiento
extra = 1
max_num = 7
# class InlineDatosProductivos(admin.TabularInline):
# model = DatosProductivos
# extra = 1
# max_num = 4
#
# class InlineDatosProductivosTabla(admin.TabularInline):
# model = DatosProductivosTabla
# extra = 1
# max_num = 2
class InlineInfraestructura(admin.TabularInline):
model = Infraestructura
extra = 1
class InlineTransporte(admin.TabularInline):
model = Transporte
max_num = 1
can_delete = False
# class InlineComercializacion(admin.TabularInline):
# model = Comercializacion
# extra = 1
# max_num = 3
#
# class InlineCacaoComercializado(admin.TabularInline):
# model = CacaoComercializado
# max_num = 1
# can_delete = False
class InlineCertificacionOrg(admin.TabularInline):
model = CertificacionOrg
max_num = 1
can_delete = False
class InlineDestinoProdCorriente(admin.TabularInline):
model = DestinoProdCorriente
extra = 1
max_num = 4
class InlineDestinoProdFermentado(admin.TabularInline):
model = DestinoProdFermentado
extra = 1
max_num = 4
class InlineFinanciamiento(admin.TabularInline):
model = Financiamiento
max_num = 1
can_delete = False
class InlineFinanciamientoProductores(admin.TabularInline):
model = FinanciamientoProductores
extra = 1
max_num = 5
class InlineInfoFinanciamiento(admin.TabularInline):
model = InfoFinanciamiento
extra = 1
max_num = 4
class EncuestaOrganicacionAdmin(admin.ModelAdmin):
# def get_queryset(self, request):
# if request.user.is_superuser:
# return EncuestaOrganicacion.objects.all()
# return EncuestaOrganicacion.objects.filter(usuario=request.user)
def save_model(self, request, obj, form, change):
obj.usuario = request.user
obj.save()
inlines = [InlineAspectosJuridicos,InlineListaMiembros,InlineDocumentacion,
InlineNivelCumplimiento,InlineProduccionComercializacion,
InlineInfraestructura,InlineTransporte,
InlineCertificacionOrg,InlineDestinoProdCorriente,InlineDestinoProdFermentado,
InlineFinanciamiento,InlineFinanciamientoProductores,InlineInfoFinanciamiento]
list_display = ('id','organizacion','fecha')
list_display_links = ('id','organizacion')
class Media:
css = {
'all': ('css/admin.css',)
}
js = ('js/admin_org.js',)
admin.site.register(Organizacion,OrganizacionAdmin)
admin.site.register(EncuestaOrganicacion,EncuestaOrganicacionAdmin)
| mit |
laurenrevere/osf.io | osf_tests/conftest.py | 6 | 2359 | import logging
import pytest
from faker import Factory
from framework.django.handlers import handlers as django_handlers
from framework.flask import rm_handlers
from website import settings
from website.app import init_app
from website.project.signals import contributor_added
from website.project.views.contributor import notify_added_contributor
# Silence some 3rd-party logging and some "loud" internal loggers
SILENT_LOGGERS = [
'api.caching.tasks',
'factory.generate',
'factory.containers',
'framework.analytics',
'framework.auth.core',
'website.app',
'website.archiver.tasks',
'website.mails',
'website.notifications.listeners',
'website.search.elastic_search',
'website.search_migration.migrate',
'website.util.paths',
'requests_oauthlib.oauth2_session',
'raven.base.Client',
'raven.contrib.django.client.DjangoClient',
'transitions.core',
'MARKDOWN',
]
for logger_name in SILENT_LOGGERS:
logging.getLogger(logger_name).setLevel(logging.CRITICAL)
# NOTE: autouse so that ADDONS_REQUESTED gets set on website.settings
@pytest.fixture(autouse=True, scope='session')
def app():
try:
test_app = init_app(routes=True, set_backends=False)
except AssertionError: # Routes have already been set up
test_app = init_app(routes=False, set_backends=False)
rm_handlers(test_app, django_handlers)
test_app.testing = True
return test_app
@pytest.yield_fixture()
def request_context(app):
context = app.test_request_context(headers={
'Remote-Addr': '146.9.219.56',
'User-Agent': 'Mozilla/5.0 (X11; U; SunOS sun4u; en-US; rv:0.9.4.1) Gecko/20020518 Netscape6/6.2.3'
})
context.push()
yield context
context.pop()
DISCONNECTED_SIGNALS = {
# disconnect notify_add_contributor so that add_contributor does not send "fake" emails in tests
contributor_added: [notify_added_contributor]
}
@pytest.fixture(autouse=True)
def disconnected_signals():
for signal in DISCONNECTED_SIGNALS:
for receiver in DISCONNECTED_SIGNALS[signal]:
signal.disconnect(receiver)
@pytest.fixture(autouse=True)
def patched_settings():
"""Patch settings for tests"""
settings.ENABLE_EMAIL_SUBSCRIPTIONS = False
settings.BCRYPT_LOG_ROUNDS = 1
@pytest.fixture()
def fake():
return Factory.create()
| apache-2.0 |
dreamingbinary/rdbms-subsetter | setup.py | 8 | 1365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
curdir = os.path.dirname(os.path.realpath(__file__))
readme = open(os.path.join(curdir, 'README.rst')).read()
setup(
name='rdbms-subsetter',
version='0.2.1',
description='Generate consistent subset of an RDBMS',
long_description=readme,
author='Catherine Devlin',
author_email='[email protected]',
url='https://github.com/18f/https://github.com/18F/rdbms-subsetter',
install_requires=[
"sqlalchemy",
],
license="CC0",
keywords='database testing',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Database',
'Topic :: Software Development :: Testing',
],
py_modules=['subsetter'],
entry_points={
'console_scripts': [
'rdbms-subsetter = subsetter:generate',
]
},
)
| cc0-1.0 |
frouty/odoogoeen | addons/l10n_it/__init__.py | 447 | 1161 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010
# OpenERP Italian Community (<http://www.openerp-italia.org>)
# Servabit srl
# Agile Business Group sagl
# Domsense srl
# Albatos srl
#
# Copyright (C) 2011
# Associazione OpenERP Italia (<http://www.openerp-italia.org>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
| agpl-3.0 |
linktlh/Toontown-journey | toontown/toon/InventoryBase.py | 3 | 11970 | from pandac.PandaModules import *
from toontown.toonbase import ToontownGlobals
from toontown.toonbase.ToontownBattleGlobals import *
from direct.showbase import DirectObject
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
class InventoryBase(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('InventoryBase')
def __init__(self, toon, invStr = None):
self._createStack = str(StackTrace().compact())
self.toon = toon
if invStr == None:
self.inventory = []
for track in xrange(0, len(Tracks)):
level = []
for thisLevel in xrange(0, len(Levels[track])):
level.append(0)
self.inventory.append(level)
else:
self.inventory = self.makeFromNetString(invStr)
self.calcTotalProps()
return
def unload(self):
del self.toon
def __str__(self):
retStr = 'totalProps: %d\n' % self.totalProps
for track in xrange(0, len(Tracks)):
retStr += Tracks[track] + ' = ' + str(self.inventory[track]) + '\n'
return retStr
def updateInvString(self, invString):
inventory = self.makeFromNetString(invString)
self.updateInventory(inventory)
return None
def updateInventory(self, inv):
self.inventory = inv
self.calcTotalProps()
def makeNetString(self):
dataList = self.inventory
datagram = PyDatagram()
for track in xrange(0, len(Tracks)):
for level in xrange(0, len(Levels[track])):
datagram.addUint8(dataList[track][level])
dgi = PyDatagramIterator(datagram)
return dgi.getRemainingBytes()
def makeFromNetString(self, netString):
dataList = []
dg = PyDatagram(netString)
dgi = PyDatagramIterator(dg)
for track in xrange(0, len(Tracks)):
subList = []
for level in xrange(0, len(Levels[track])):
if dgi.getRemainingSize() > 0:
value = dgi.getUint8()
else:
value = 0
subList.append(value)
dataList.append(subList)
return dataList
def makeFromNetStringForceSize(self, netString, numTracks, numLevels):
dataList = []
dg = PyDatagram(netString)
dgi = PyDatagramIterator(dg)
for track in xrange(0, numTracks):
subList = []
for level in xrange(0, numLevels):
if dgi.getRemainingSize() > 0:
value = dgi.getUint8()
else:
value = 0
subList.append(value)
dataList.append(subList)
return dataList
def addItem(self, track, level):
return self.addItems(track, level, 1)
def addItems(self, track, level, amount):
if isinstance(track, str):
track = Tracks.index(track)
max = self.getMax(track, level)
if (not hasattr(self.toon, 'experience')) or (not hasattr(self.toon.experience, 'getExpLevel')):
return 0
if not (self.toon.experience.getExpLevel(track) >= level and self.toon.hasTrackAccess(track)):
return 0
if self.numItem(track, level) > max - amount:
return -1
if not (self.totalProps + amount <= self.toon.getMaxCarry() or level > LAST_REGULAR_GAG_LEVEL):
return -2
self.inventory[track][level] += amount
self.totalProps += amount
return self.inventory[track][level]
def addItemWithList(self, track, levelList):
for level in levelList:
self.addItem(track, level)
def numItem(self, track, level):
if isinstance(track, str):
track = Tracks.index(track)
if track > len(Tracks) - 1 or level > len(Levels) - 1:
self.notify.warning("%s is using a gag that doesn't exist %s %s!" % (self.toon.doId, track, level))
return -1
return self.inventory[track][level]
def useItem(self, track, level):
if type(track) == type(''):
track = Tracks.index(track)
if self.numItem(track, level) > 0:
self.inventory[track][level] -= 1
self.calcTotalProps()
return 1
elif self.numItem(track, level) == -1:
return -1
def setItem(self, track, level, amount):
if type(track) == type(''):
track = Tracks.index(track)
max = self.getMax(track, level)
curAmount = self.numItem(track, level)
if self.toon.experience.getExpLevel(track) >= level:
if amount <= max:
if self.totalProps - curAmount + amount <= self.toon.getMaxCarry():
self.inventory[track][level] = amount
self.totalProps = self.totalProps - curAmount + amount
return self.inventory[track][level]
else:
return -2
else:
return -1
else:
return 0
def getMax(self, track, level):
if type(track) == type(''):
track = Tracks.index(track)
maxList = CarryLimits[track]
if self.toon.experience:
return maxList[self.toon.experience.getExpLevel(track)][level]
else:
return 0
def getTrackAndLevel(self, propName):
for track in xrange(0, len(Tracks)):
if AvProps[track].count(propName):
return (tracks, AvProps[track].index(propName))
return (-1, -1)
def calcTotalProps(self):
self.totalProps = 0
for track in xrange(0, len(Tracks)):
for level in xrange(0, len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL:
self.totalProps += self.numItem(track, level)
return None
def countPropsInList(self, invList):
totalProps = 0
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL:
totalProps += invList[track][level]
return totalProps
def setToMin(self, newInventory):
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
self.inventory[track][level] = min(self.inventory[track][level], newInventory[track][level])
self.calcTotalProps()
return None
def validateItemsBasedOnExp(self, newInventory, allowUber = 0):
if type(newInventory) == type('String'):
tempInv = self.makeFromNetString(newInventory)
else:
tempInv = newInventory
for track in xrange(len(Tracks)):
for level in xrange(len(Levels[track])):
if tempInv[track][level] > self.getMax(track, level):
return 0
if tempInv[track][level] > 0 and not self.toon.hasTrackAccess(track):
commentStr = "Player %s trying to purchase gag they don't have track access to. track: %s level: %s" % (self.toon.doId, track, level)
dislId = self.toon.DISLid
if simbase.config.GetBool('want-ban-gagtrack', False):
#simbase.air.banManager.ban(self.toon.doId, dislId, commentStr)
pass
return 0
if level > LAST_REGULAR_GAG_LEVEL and tempInv[track][level] > self.inventory[track][level] or allowUber:
return 0
return 1
def validateItemsBasedOnAccess(self, newInventory):
return 1
def getMinCostOfPurchase(self, newInventory):
return self.countPropsInList(newInventory) - self.totalProps
def validatePurchase(self, newInventory, currentMoney, newMoney):
if newMoney > currentMoney:
self.notify.warning('Somebody lied about their money! Rejecting purchase.')
return 0
newItemTotal = self.countPropsInList(newInventory)
oldItemTotal = self.totalProps
if newItemTotal > oldItemTotal + currentMoney:
self.notify.warning('Somebody overspent! Rejecting purchase.')
return 0
if newItemTotal - oldItemTotal > currentMoney - newMoney:
self.notify.warning('Too many items based on money spent! Rejecting purchase.')
return 0
if newItemTotal > self.toon.getMaxCarry():
self.notify.warning('Cannot carry %s items! Rejecting purchase.' % newItemTotal)
return 0
if not self.validateItemsBasedOnExp(newInventory):
self.notify.warning('Somebody is trying to buy forbidden items! ' + 'Rejecting purchase.')
return 0
if not self.validateItemsBasedOnAccess(newInventory):
simbase.air.writeServerEvent('suspicious', self.toon.doId, 'non-paid av trying to purchase paid gags')
return 0
self.updateInventory(newInventory)
return 1
def maxOutInv(self, filterUberGags = 0, filterPaidGags = 0):
unpaid = self.toon.getGameAccess() != ToontownGlobals.AccessFull
for track in xrange(len(Tracks)):
if self.toon.hasTrackAccess(track):
for level in xrange(len(Levels[track])):
if level <= LAST_REGULAR_GAG_LEVEL or not filterUberGags:
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
self.addItem(track, level)
addedAnything = 1
while addedAnything:
addedAnything = 0
result = 0
for track in xrange(len(Tracks)):
if self.toon.hasTrackAccess(track):
level = len(Levels[track]) - 1
if level > LAST_REGULAR_GAG_LEVEL and filterUberGags:
level = LAST_REGULAR_GAG_LEVEL
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
result = self.addItem(track, level)
level -= 1
while result <= 0 and level >= 0:
if not filterPaidGags or not (unpaid and gagIsPaidOnly(track, level)):
result = self.addItem(track, level)
level -= 1
if result > 0:
addedAnything = 1
self.calcTotalProps()
return None
def NPCMaxOutInv(self, targetTrack=-1, maxLevelIndex=5):
result = 0
for level in xrange(maxLevelIndex, -1, -1):
anySpotsAvailable = 1
while anySpotsAvailable == 1:
anySpotsAvailable = 0
trackResults = []
for track in xrange(len(Tracks)):
if targetTrack != -1 and targetTrack != track:
continue
result = self.addItem(track, level)
trackResults.append(result)
if result == -2:
break
for res in trackResults:
if res > 0:
anySpotsAvailable = 1
if result == -2:
break
self.calcTotalProps()
return None
def zeroInv(self, killUber = 0):
for track in xrange(len(Tracks)):
for level in xrange(UBER_GAG_LEVEL_INDEX):
self.inventory[track][level] = 0
if killUber:
self.inventory[track][UBER_GAG_LEVEL_INDEX] = 0
if self.inventory[track][UBER_GAG_LEVEL_INDEX] > 1:
self.inventory[track][UBER_GAG_LEVEL_INDEX] = 1
self.calcTotalProps()
return None
def _garbageInfo(self):
return self._createStack
| apache-2.0 |
srio/shadow3-scripts | transfocator_id30b.py | 1 | 25823 | import numpy
import xraylib
"""
transfocator_id30b : transfocator for id13b:
It can:
1) guess the lens configuration (number of lenses for each type) for a given photon energy
and target image size. Use transfocator_compute_configuration() for this task
2) for a given transfocator configuration, compute the main optical parameters
(image size, focal distance, focal position and divergence).
Use transfocator_compute_parameters() for this task
3) Performs full ray tracing. Use id30b_ray_tracing() for this task
Note that for the optimization and parameters calculations the transfocator configuration is
given in keywords. For ray tracing calculations many parameters of the transfocator are hard coded
with the values of id30b
See main program for examples.
Dependencies:
Numpy
xraylib (to compute refracion indices)
Shadow (for ray tracing only)
matplotlib (for some plots of ray=tracing)
Side effects:
When running ray tracing some files are created.
MODIFICATION HISTORY:
2015-03-25 [email protected], written
"""
__author__ = "Manuel Sanchez del Rio"
__contact__ = "[email protected]"
__copyright__ = "ESRF, 2015"
def transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=1 ):
"""
Computes the optimum transfocator configuration for a given photon energy and target image size.
All length units are cm
:param photon_energy_ev: the photon energy in eV
:param s_target: the target image size in cm.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:param:verbose: set to 1 for verbose text output
:return: a list with the number of lenses of each type.
"""
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return None
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \
lens_diameter=lens_diameter,method=2)
focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p)
div_q_target = alpha * lens_diameter / focal_q_target
#corrections for extreme cases
source_demagnified = 2.35*sigmaz*focal_q_target/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2)
nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\
nlenses_max=nlenses_max,radii=nlenses_radii, )
if verbose:
print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target))
print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target))
print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4))
print("transfocator_compute_configuration: nlenses_target: ",nlenses_target)
return nlenses_target
def transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 ):
"""
Computes the parameters of the optical performances of a given transgocator configuration.
returns a l
All length units are cm
:param photon_energy_ev:
:param nlenses_target: a list with the lens configuration, i.e. the number of lenses of each type.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
TODO: remove (not used)
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:return: a list with parameters (image_siza, lens_focal_distance,
focal_position from transfocator center, divergence of beam after the transfocator)
"""
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_f = _transfocator_calculate_focal_distance( deltas=deltas,\
nlenses=nlenses_target,radii=nlenses_radii)
focal_q = 1.0 / (1.0/focal_f - 1.0/tf_p)
div_q = alpha * lens_diameter / focal_q
#corrections
source_demagnified = 2.35*sigmaz*focal_q/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target = numpy.sqrt( (div_q*(tf_q-focal_q))**2 + (source_demagnified)**2 )
return (s_target,focal_f,focal_q,div_q)
def transfocator_nlenses_to_slots(nlenses,nlenses_max=None):
"""
converts the transfocator configuration from a list of the number of lenses of each type,
into a list of active (1) or inactive (0) actuators for the slots.
:param nlenses: the list with number of lenses (e.g., [5,2,0]
:param nlenses_max: the maximum number of lenses of each type, usually powers of two minus one.
E.g. [15,3,1]
:return: a list of on (1) and off (0) slots, e.g., [1, 0, 1, 0, 0, 1, 0]
(first type: 1*1+0*2+1*4+0*8=5, second type: 0*1+1*2=2, third type: 0*1=0)
"""
if nlenses_max == None:
nlenses_max = nlenses
ss = []
for i,iopt in enumerate(nlenses):
if iopt > nlenses_max[i]:
print("Error: i:%d, nlenses: %d, nlenses_max: %d"%(i,iopt,nlenses_max[i]))
ncharacters = len("{0:b}".format(nlenses_max[i]))
si = list( ("{0:0%db}"%(ncharacters)).format(int(iopt)) )
si.reverse()
ss += si
on_off = [int(i) for i in ss]
#print("transfocator_nlenses_to_slots: nlenses_max: ",nlenses_max," nlenses: ",nlenses," slots: ",on_off)
return on_off
def _transfocator_calculate_focal_distance(deltas=[0.999998],nlenses=[1],radii=[500e-4]):
inverse_focal_distance = 0.0
for i,nlensesi in enumerate(nlenses):
if nlensesi > 0:
focal_distance_i = radii[i] / (2.*nlensesi*deltas[i])
inverse_focal_distance += 1.0/focal_distance_i
if inverse_focal_distance == 0:
return 99999999999999999999999999.
else:
return 1.0/inverse_focal_distance
def _tansfocator_guess_focal_position( s_target, p=5960., q=3800.0, sigmaz=6.46e-4, \
alpha=0.66, lens_diameter=0.05, method=2):
x = 1e15
if method == 1: # simple sum
AA = 2.35*sigmaz/p
BB = -(s_target + alpha * lens_diameter)
CC = alpha*lens_diameter*q
cc = numpy.roots([AA,BB,CC])
x = cc[1]
return x
if method == 2: # sum in quadrature
AA = ( (2.35*sigmaz)**2)/(p**2)
BB = 0.0
CC = alpha**2 * lens_diameter**2 - s_target**2
DD = - 2.0 * alpha**2 * lens_diameter**2 * q
EE = alpha**2 * lens_diameter**2 * q**2
cc = numpy.roots([AA,BB,CC,DD,EE])
for i,cci in enumerate(cc):
if numpy.imag(cci) == 0:
return numpy.real(cci)
return x
def _transfocator_guess_configuration(focal_f_target,deltas=[0.999998],nlenses_max=[15],radii=[500e-4]):
nn = len(nlenses_max)
ncombinations = (1+nlenses_max[0]) * (1+nlenses_max[1]) * (1+nlenses_max[2])
icombinations = 0
aa = numpy.zeros((3,ncombinations),dtype=int)
bb = numpy.zeros(ncombinations)
for i0 in range(1+nlenses_max[0]):
for i1 in range(1+nlenses_max[1]):
for i2 in range(1+nlenses_max[2]):
aa[0,icombinations] = i0
aa[1,icombinations] = i1
aa[2,icombinations] = i2
bb[icombinations] = focal_f_target - _transfocator_calculate_focal_distance(deltas=deltas,nlenses=[i0,i1,i2],radii=radii)
icombinations += 1
bb1 = numpy.abs(bb)
ibest = bb1.argmin()
return (aa[:,ibest]).tolist()
#
#
#
def id30b_ray_tracing(emittH=4e-9,emittV=1e-11,betaH=35.6,betaV=3.0,number_of_rays=50000,\
density=1.845,symbol="Be",tf_p=1000.0,tf_q=1000.0,lens_diameter=0.05,\
slots_max=None,slots_on_off=None,photon_energy_ev=14000.0,\
slots_lens_thickness=None,slots_steps=None,slots_radii=None,\
s_target=10e-4,focal_f=10.0,focal_q=10.0,div_q=1e-6):
#=======================================================================================================================
# Gaussian undulator source
#=======================================================================================================================
import Shadow
#import Shadow.ShadowPreprocessorsXraylib as sx
sigmaXp = numpy.sqrt(emittH/betaH)
sigmaZp = numpy.sqrt(emittV/betaV)
sigmaX = emittH/sigmaXp
sigmaZ = emittV/sigmaZp
print("\n\nElectron sizes H:%f um, V:%fu m;\nelectron divergences: H:%f urad, V:%f urad"%\
(sigmaX*1e6, sigmaZ*1e6, sigmaXp*1e6, sigmaZp*1e6))
# set Gaussian source
src = Shadow.Source()
src.set_energy_monochromatic(photon_energy_ev)
src.set_gauss(sigmaX*1e2,sigmaZ*1e2,sigmaXp,sigmaZp)
print("\n\nElectron sizes stored H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
src.apply_gaussian_undulator(undulator_length_in_m=2.8, user_unit_to_m=1e-2, verbose=1)
print("\n\nElectron sizes stored (undulator) H:%f um, V:%f um;\nelectron divergences: H:%f urad, V:%f urad"%\
(src.SIGMAX*1e4,src.SIGMAZ*1e4,src.SIGDIX*1e6,src.SIGDIZ*1e6))
print("\n\nSource size in vertical FWHM: %f um\n"%\
(2.35*src.SIGMAZ*1e4))
src.NPOINT = number_of_rays
src.ISTAR1 = 0 # 677543155
src.write("start.00")
# create source
beam = Shadow.Beam()
beam.genSource(src)
beam.write("begin.dat")
src.write("end.00")
#=======================================================================================================================
# complete the (detailed) transfocator description
#=======================================================================================================================
print("\nSetting detailed Transfocator for ID30B")
slots_nlenses = numpy.array(slots_max)*numpy.array(slots_on_off)
slots_empty = (numpy.array(slots_max)-slots_nlenses)
#
####interactive=True, SYMBOL="SiC",DENSITY=3.217,FILE="prerefl.dat",E_MIN=100.0,E_MAX=20000.0,E_STEP=100.0
Shadow.ShadowPreprocessorsXraylib.prerefl(interactive=False,E_MIN=2000.0,E_MAX=55000.0,E_STEP=100.0,\
DENSITY=density,SYMBOL=symbol,FILE="Be2_55.dat" )
nslots = len(slots_max)
prerefl_file = ["Be2_55.dat" for i in range(nslots)]
print("slots_max: ",slots_max)
#print("slots_target: ",slots_target)
print("slots_on_off: ",slots_on_off)
print("slots_steps: ",slots_steps)
print("slots_radii: ",slots_radii)
print("slots_nlenses: ",slots_nlenses)
print("slots_empty: ",slots_empty)
#calculate distances, nlenses and slots_empty
# these are distances p and q with TF length removed
tf_length = numpy.array(slots_steps).sum() #tf length in cm
tf_fs_before = tf_p - 0.5*tf_length #distance from source to center of transfocator
tf_fs_after = tf_q - 0.5*tf_length # distance from center of transfocator to image
# for each slot, these are the empty distances before and after the lenses
tf_p0 = numpy.zeros(nslots)
tf_q0 = numpy.array(slots_steps) - (numpy.array(slots_max) * slots_lens_thickness)
# add now the p q distances
tf_p0[0] += tf_fs_before
tf_q0[-1] += tf_fs_after
print("tf_p0: ",tf_p0)
print("tf_q0: ",tf_q0)
print("tf_length: %f cm"%(tf_length))
# build transfocator
tf = Shadow.CompoundOE(name='TF ID30B')
tf.append_transfocator(tf_p0.tolist(), tf_q0.tolist(), \
nlenses=slots_nlenses.tolist(), radius=slots_radii, slots_empty=slots_empty.tolist(),\
thickness=slots_lens_thickness, prerefl_file=prerefl_file,\
surface_shape=4, convex_to_the_beam=0, diameter=lens_diameter,\
cylinder_angle=0.0,interthickness=50e-4,use_ccc=0)
itmp = input("SHADOW Source complete. Do you want to run SHADOR trace? [1=Yes,0=No]: ")
if str(itmp) != "1":
return
#trace system
tf.dump_systemfile()
beam.traceCompoundOE(tf,write_start_files=0,write_end_files=0,write_star_files=0, write_mirr_files=0)
#write only last result file
beam.write("star_tf.dat")
print("\nFile written to disk: star_tf.dat")
#
# #ideal calculations
#
print("\n\n\n")
print("=============================================== TRANSFOCATOR OUTPUTS ==========================================")
print("\nTHEORETICAL results: ")
print("REMIND-----With these lenses we obtained (analytically): ")
print("REMIND----- focal_f: %f cm"%(focal_f))
print("REMIND----- focal_q: %f cm"%(focal_q))
print("REMIND----- s_target: %f um"%(s_target*1e4))
demagnification_factor = tf_p/focal_q
theoretical_focal_size = src.SIGMAZ*2.35/demagnification_factor
# analyze shadow results
print("\nSHADOW results: ")
st1 = beam.get_standard_deviation(3,ref=0)
st2 = beam.get_standard_deviation(3,ref=1)
print(" stDev*2.35: unweighted: %f um, weighted: %f um "%(st1*2.35*1e4,st2*2.35*1e4))
tk = beam.histo1(3, nbins=75, ref=1, nolost=1, write="HISTO1")
print(" Histogram FWHM: %f um "%(1e4*tk["fwhm"]))
print(" Transmitted intensity: %f (source was: %d) (transmission is %f %%) "%(beam.intensity(nolost=1), src.NPOINT, beam.intensity(nolost=1)/src.NPOINT*100))
#scan around image
xx1 = numpy.linspace(0.0,1.1*tf_fs_after,11) # position from TF exit plane
#xx0 = focal_q - tf_length*0.5
xx0 = focal_q - tf_length*0.5 # position of focus from TF exit plane
xx2 = numpy.linspace(xx0-100.0,xx0+100,21) # position from TF exit plane
xx3 = numpy.array([tf_fs_after])
xx = numpy.concatenate(([-0.5*tf_length],xx1,xx2,[tf_fs_after]))
xx.sort()
f = open("id30b.spec","w")
f.write("#F id30b.spec\n")
f.write("\n#S 1 calculations for id30b transfocator\n")
f.write("#N 8\n")
labels = " %18s %18s %18s %18s %18s %18s %18s %18s"%\
("pos from source","pos from image","[pos from TF]", "pos from TF center", "pos from focus",\
"fwhm shadow(stdev)","fwhm shadow(histo)","fwhm theoretical")
f.write("#L "+labels+"\n")
out = numpy.zeros((8,xx.size))
for i,pos in enumerate(xx):
beam2 = beam.duplicate()
beam2.retrace(-tf_fs_after+pos)
fwhm1 = 2.35*1e4*beam2.get_standard_deviation(3,ref=1,nolost=1)
tk = beam2.histo1(3, nbins=75, ref=1, nolost=1)
fwhm2 = 1e4*tk["fwhm"]
#fwhm_th = 1e4*transfocator_calculate_estimated_size(pos,diameter=diameter,focal_distance=focal_q)
fwhm_th2 = 1e4*numpy.sqrt( (div_q*(pos+0.5*tf_length-focal_q))**2 + theoretical_focal_size**2 )
#fwhm_th2 = 1e4*( numpy.abs(div_q*(pos-focal_q+0.5*tf_length)) + theoretical_focal_size )
out[0,i] = tf_fs_before+tf_length+pos
out[1,i] = -tf_fs_after+pos
out[2,i] = pos
out[3,i] = pos+0.5*tf_length
out[4,i] = pos+0.5*tf_length-focal_q
out[5,i] = fwhm1
out[6,i] = fwhm2
out[7,i] = fwhm_th2
f.write(" %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f %18.3f \n"%\
(tf_fs_before+tf_length+pos,\
-tf_fs_after+pos,\
pos,\
pos+0.5*tf_length,\
pos+0.5*tf_length-focal_q,\
fwhm1,fwhm2,fwhm_th2))
f.close()
print("File with beam evolution written to disk: id30b.spec")
#
# plots
#
itmp = input("Do you want to plot the intensity distribution and beam evolution? [1=yes,0=No]")
if str(itmp) != "1":
return
import matplotlib.pylab as plt
plt.figure(1)
plt.plot(out[1,:],out[5,:],'blue',label="fwhm shadow(stdev)")
plt.plot(out[1,:],out[6,:],'green',label="fwhm shadow(histo1)")
plt.plot(out[1,:],out[7,:],'red',label="fwhm theoretical")
plt.xlabel("Distance from image plane [cm]")
plt.ylabel("spot size [um] ")
ax = plt.subplot(111)
ax.legend(bbox_to_anchor=(1.1, 1.05))
print("Kill graphic to continue.")
plt.show()
Shadow.ShadowTools.histo1(beam,3,nbins=75,ref=1,nolost=1,calfwhm=1)
input("<Enter> to finish.")
return None
def id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=None):
if nlenses_target == None:
force_nlenses = 0
else:
force_nlenses = 1
#
# define lens setup (general)
#
xrl_symbol = ["Be","Be","Be"]
xrl_density = [1.845,1.845,1.845]
lens_diameter = 0.05
nlenses_max = [15,3,1]
nlenses_radii = [500e-4,1000e-4,1500e-4]
sigmaz=6.46e-4
alpha = 0.55
tf_p = 5960 # position of the TF measured from the center of the transfocator
tf_q = 9760 - tf_p # position of the image plane measured from the center of the transfocator
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return
print("================================== TRANSFOCATOR INPUTS ")
print("Photon energy: %f eV"%(photon_energy_ev))
if force_nlenses:
print("Forced_nlenses: ",nlenses_target)
else:
print("target size: %f cm"%(s_target))
print("materials: ",xrl_symbol)
print("densities: ",xrl_density)
print("Lens diameter: %f cm"%(lens_diameter))
print("nlenses_max:",nlenses_max,"nlenses_radii: ",nlenses_radii)
print("Source size (sigma): %f um, FWHM: %f um"%(1e4*sigmaz,2.35*1e4*sigmaz))
print("Distances: tf_p: %f cm, tf_q: %f cm"%(tf_p,tf_q))
print("alpha: %f"%(alpha))
print("========================================================")
if force_nlenses != 1:
nlenses_target = transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, lens_diameter=lens_diameter, \
sigmaz=sigmaz, alpha=alpha, \
tf_p=tf_p,tf_q=tf_q, verbose=1)
(s_target,focal_f,focal_q,div_q) = \
transfocator_compute_parameters(photon_energy_ev, nlenses_target,\
symbol=xrl_symbol,density=xrl_density,\
nlenses_max=nlenses_max, nlenses_radii=nlenses_radii, \
lens_diameter=lens_diameter,\
sigmaz=sigmaz, alpha=alpha,\
tf_p=tf_p,tf_q=tf_q)
slots_max = [ 1, 2, 4, 8, 1, 2, 1] # slots
slots_on_off = transfocator_nlenses_to_slots(nlenses_target,nlenses_max=nlenses_max)
print("=============================== TRANSFOCATOR SET")
#print("deltas: ",deltas)
if force_nlenses != 1:
print("nlenses_target (optimized): ",nlenses_target)
else:
print("nlenses_target (forced): ",nlenses_target)
print("With these lenses we obtain: ")
print(" focal_f: %f cm"%(focal_f))
print(" focal_q: %f cm"%(focal_q))
print(" s_target: %f um"%(s_target*1e4))
print(" slots_max: ",slots_max)
print(" slots_on_off: ",slots_on_off)
print("==================================================")
# for theoretical calculations use the focal position and distances given by the target nlenses
itmp = input("Start SHADOW simulation? [1=yes,0=No]: ")
if str(itmp) != "1":
return
#=======================================================================================================================
# Inputs
#=======================================================================================================================
emittH = 3.9e-9
emittV = 10e-12
betaH = 35.6
betaV = 3.0
number_of_rays = 50000
nslots = len(slots_max)
slots_lens_thickness = [0.3 for i in range(nslots)] #total thickness of a single lens in cm
# for each slot, positional gap of the first lens in cm
slots_steps = [ 4, 4, 1.9, 6.1, 4, 4, slots_lens_thickness[-1]]
slots_radii = [.05, .05, .05, .05, 0.1, 0.1, 0.15] # radii of the lenses in cm
AAA= 333
id30b_ray_tracing(emittH=emittH,emittV=emittV,betaH=betaH,betaV=betaV,number_of_rays=number_of_rays,\
density=xrl_density[0],symbol=xrl_symbol[0],tf_p=tf_p,tf_q=tf_q,lens_diameter=lens_diameter,\
slots_max=slots_max,slots_on_off=slots_on_off,photon_energy_ev=photon_energy_ev,\
slots_lens_thickness=slots_lens_thickness,slots_steps=slots_steps,slots_radii=slots_radii,\
s_target=s_target,focal_f=focal_f,focal_q=focal_q,div_q=div_q)
def main():
# this performs the full simulation: calculates the optimum configuration and do the ray-tracing
itmp = input("Enter: \n 0 = optimization calculation only \n 1 = full simulation (ray tracing) \n?> ")
photon_energy_kev = float(input("Enter photon energy in keV: "))
s_target_um = float(input("Enter target focal dimension in microns: "))
if str(itmp) == "1":
id30b_full_simulation(photon_energy_ev=photon_energy_kev*1e3,s_target=s_target_um*1e-4,nlenses_target=None)
#id30b_full_simulation(photon_energy_ev=14000.0,s_target=20.0e-4,nlenses_target=[3,1,1])
else:
#this performs the calculation of the optimizad configuration
nlenses_optimum = transfocator_compute_configuration(photon_energy_kev*1e3,s_target_um*1e-4,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=0 )
print("Optimum lens configuration is: ",nlenses_optimum)
if nlenses_optimum == None:
return
print("Activate slots: ",transfocator_nlenses_to_slots(nlenses_optimum,nlenses_max=[15,3,1]))
# this calculates the parameters (image size, etc) for a given lens configuration
(size, f, q_f, div) = transfocator_compute_parameters(photon_energy_kev*1e3, nlenses_optimum,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800 )
print("For given configuration ",nlenses_optimum," we get: ")
print(" size: %f cm, focal length: %f cm, focal distance: %f cm, divergence: %f rad: "%(size, f, q_f, div))
if __name__ == "__main__":
main() | mit |
jose187/gh_word_count | gh_word_count/__init__.py | 1 | 2681 |
from ommit_words import list_ommited_words
from re import sub
import operator
class _input_list:
def __init__(self,list_TITLES):
self.list_TITLES = list_TITLES
self.list_remove = list_ommited_words()
def _word_count(self):
# these are all the words that are in the text
dict_words = {}
# now we go through each of the lines
for str_line in self.list_TITLES:
str_raw_line1 = sub('[^a-z0-9 ]','',str_line.lower())
list_line_words = str_raw_line1.split()
for str_word in list_line_words:
# check to see if its in the ommited word list
if str_word not in self.list_remove:
# create new key if it is not there yet
if str_word not in dict_words:
dict_words[str_word] = [1]
# add if is already there
elif str_word in dict_words:
dict_words[str_word].append(1)
sorted_x = sorted(dict_words.iteritems(),
key=operator.itemgetter(1),
reverse=True)
list_OUTPUT = []
for each_item in sorted_x:
int_COUNT = sum(each_item[1])
if int_COUNT > 1:
tup_ONE_COUNT = ('%s' % each_item[0],
'%d' % int_COUNT)
list_OUTPUT.append(tup_ONE_COUNT)
return list_OUTPUT
# gets the top x according to frequency
# returns list
def _get_top(self,int_TOP):
list_TOP_N = []
for str_WORD in self._word_count()[:int_TOP]:
list_TOP_N.append(str_WORD)
return list_TOP_N
# displays the count on the terminal
def _show_count(list_TUPS,entries=0):
if entries == 0:
int_TOP = len(list_TUPS)
else:
int_TOP = entries
print 'Count\tWord\n'
for tup_ITEM in list_TUPS[:int_TOP]:
print '%d\t%s' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
# saves the count to csv file
def _save_counts(list_COUNTS,str_FILE_PATH,entries=0):
if entries == 0:
int_TOP = len(list_COUNTS)
else:
int_TOP = entries
list_OUTPUT = ['"Count","Word"']
for tup_ITEM in list_COUNTS[:int_TOP]:
str_OUTPUT = '%d,"%s"' % (int(tup_ITEM[1]),str(tup_ITEM[0]))
list_OUTPUT.append(str_OUTPUT)
fw_OUTPUT = open(str_FILE_PATH,'w')
fw_OUTPUT.write('\n'.join(list_OUTPUT))
fw_OUTPUT.close()
| bsd-2-clause |
bdh1011/wau | venv/lib/python2.7/site-packages/twisted/internet/_glibbase.py | 27 | 12813 | # -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides base support for Twisted to interact with the glib/gtk
mainloops.
The classes in this module should not be used directly, but rather you should
import gireactor or gtk3reactor for GObject Introspection based applications,
or glib2reactor or gtk2reactor for applications using legacy static bindings.
"""
from __future__ import division, absolute_import
import sys
from zope.interface import implementer
from twisted.internet import base, posixbase, selectreactor
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import log
def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
"""
Check whether the given modules were imported, and if requested, ensure
they will not be importable in the future.
@param moduleNames: A list of module names we make sure aren't imported.
@type moduleNames: C{list} of C{str}
@param preventImports: A list of module name whose future imports should
be prevented.
@type preventImports: C{list} of C{str}
@param errorMessage: Message to use when raising an C{ImportError}.
@type errorMessage: C{str}
@raises: C{ImportError} with given error message if a given module name
has already been imported.
"""
for name in moduleNames:
if sys.modules.get(name) is not None:
raise ImportError(errorMessage)
# Disable module imports to avoid potential problems.
for name in preventImports:
sys.modules[name] = None
class GlibWaker(posixbase._UnixWaker):
"""
Run scheduled events after waking up.
"""
def doRead(self):
posixbase._UnixWaker.doRead(self)
self.reactor._simulate()
@implementer(IReactorFDSet)
class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
Base class for GObject event loop reactors.
Notification for I/O events (reads and writes on file descriptors) is done
by the gobject-based event loop. File descriptors are registered with
gobject with the appropriate flags for read/write/disconnect notification.
Time-based events, the results of C{callLater} and C{callFromThread}, are
handled differently. Rather than registering each event with gobject, a
single gobject timeout is registered for the earliest scheduled event, the
output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
and 3.4 seconds, a single timeout is registered for 1 second in the
future. When this timeout is hit, C{_simulate} is called, which calls the
appropriate Twisted-level handlers, and a new timeout is added to gobject
by the C{_reschedule} method.
To handle C{callFromThread} events, we use a custom waker that calls
C{_simulate} whenever it wakes up.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to
GSource handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A GSource handle for the next L{simulate} call.
"""
# Install a waker that knows it needs to call C{_simulate} in order to run
# callbacks queued from a thread:
_wakerFactory = GlibWaker
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
self._glib = glib_module
self._gtk = gtk_module
posixbase.PosixReactorBase.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._pending = self._gtk.events_pending
self._iteration = self._gtk.main_iteration_do
self._crash = _mainquit
self._run = self._gtk.main
else:
self.context = self._glib.main_context_default()
self._pending = self.context.pending
self._iteration = self.context.iteration
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def _handleSignals(self):
# First, install SIGINT and friends:
base._SignalReactorMixin._handleSignals(self)
# Next, since certain versions of gtk will clobber our signal handler,
# set all signal handlers again after the event loop has started to
# ensure they're *really* set. We don't call this twice so we don't
# leak file descriptors created in the SIGCHLD initialization:
self.callLater(0, posixbase.PosixReactorBase._handleSignals, self)
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, 'fileno'):
# handle python objects
def wrapper(ignored, condition):
return callback(source, condition)
fileno = source.fileno()
else:
fileno = source
wrapper = callback
return self._glib.io_add_watch(
fileno, condition, wrapper,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
def _ioEventCallback(self, source, condition):
"""
Called by event loop when an I/O event occurs.
"""
log.callWithLogger(
source, self._doReadOrWrite, source, source, condition)
return True # True = don't auto-remove the source
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
self._source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(
source, flags, self._ioEventCallback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes,
self.INFLAGS, self.OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads,
self.OUTFLAGS, self.INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
self._source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(
source, flags, self._ioEventCallback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, self.INFLAGS)
def iterate(self, delay=0):
"""
One iteration of the event loop, for trial's use.
This is not used for actual reactor runs.
"""
self.runUntilCurrent()
while self._pending():
self._iteration(0)
def crash(self):
"""
Crash the reactor.
"""
posixbase.PosixReactorBase.crash(self)
self._crash()
def stop(self):
"""
Stop the reactor.
"""
posixbase.PosixReactorBase.stop(self)
# The base implementation only sets a flag, to ensure shutting down is
# not reentrant. Unfortunately, this flag is not meaningful to the
# gobject event loop. We therefore call wakeUp() to ensure the event
# loop will call back into Twisted once this iteration is done. This
# will result in self.runUntilCurrent() being called, where the stop
# flag will trigger the actual shutdown process, eventually calling
# crash() which will do the actual gobject event loop shutdown.
self.wakeUp()
def run(self, installSignalHandlers=True):
"""
Run the reactor.
"""
self.callWhenRunning(self._reschedule)
self.startRunning(installSignalHandlers=installSignalHandlers)
if self._started:
self._run()
def callLater(self, *args, **kwargs):
"""
Schedule a C{DelayedCall}.
"""
result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
# Make sure we'll get woken up at correct time to handle this new
# scheduled call:
self._reschedule()
return result
def _reschedule(self):
"""
Schedule a glib timeout for C{_simulate}.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self._simtag = None
timeout = self.timeout()
if timeout is not None:
self._simtag = self._timeout_add(
int(timeout * 1000), self._simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
def _simulate(self):
"""
Run timers, and then reschedule glib timeout for next scheduled event.
"""
self.runUntilCurrent()
self._reschedule()
class PortableGlibReactorBase(selectreactor.SelectReactor):
"""
Base class for GObject event loop reactors that works on Windows.
Sockets aren't supported by GObject's input_add on Win32.
"""
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._glib = glib_module
self._gtk = gtk_module
selectreactor.SelectReactor.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._crash = _mainquit
self._run = self._gtk.main
else:
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def crash(self):
selectreactor.SelectReactor.crash(self)
self._crash()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self._timeout_add(0, self.simulate)
if self._started:
self._run()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.01)
if timeout is None:
timeout = 0.01
self._simtag = self._timeout_add(
int(timeout * 1000), self.simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE)
| mit |
konradxyz/dev_fileserver | plugins/riemann-controller/setup.py | 2 | 1329 | #########
# Copyright (c) 2013 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
setup(
name='cloudify-riemann-controller-plugin',
version='3.3a3',
author='Gigaspaces',
author_email='[email protected]',
packages=['riemann_controller',
'riemann_controller.resources'],
package_data={'riemann_controller.resources': [
'manager.config',
'deployment.config.template'
]},
license='LICENSE',
description='Plugin for creating riemann configuration'
' based on blueprint policies and starting '
' a riemann core with generated configuration',
install_requires=[
'cloudify-plugins-common==3.3a3',
'jinja2==2.7.2'
],
)
| apache-2.0 |
naveentata/coala-bears | tests/hypertext/BootLintBearTest.py | 24 | 2763 | from bears.hypertext.BootLintBear import BootLintBear
from coalib.testing.LocalBearTestHelper import verify_local_bear
good_file = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js">
</script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js">
</script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button type="submit">Submit</button>
<button type="reset">Reset</button>
<button type="button">Button</button>
<div id="qunit"></div>
<ol id="bootlint"></ol>
</body>
</html>
"""
bad_file = """
<html lang="en">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js"></script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button>No type set</button>
<div>
<div class="panel-body">
<p>Something</p>
</div>
</div>
<div id="qunit"></div>
<ol id="bootlint">
<li data-lint="Found one or more `<button>`s
missing a `type` attribute."></li>
</ol>
</body>
</html>
"""
# There's a missing type in <button> tag, missing DOCTYPE
# and panel has no body.
BootLintBearTest = verify_local_bear(BootLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
BootLintBearDisableTest = verify_local_bear(
BootLintBear,
valid_files=(good_file, bad_file),
invalid_files=(),
settings={'bootlint_ignore': 'W001,W007,E001,E023'})
| agpl-3.0 |
aaronsw/watchdog | vendor/rdflib-2.4.0/rdflib/sparql/bison/Expression.py | 4 | 2821 | from Util import ListRedirect
class ParsedConditionalAndExpressionList(ListRedirect):
"""
A list of ConditionalAndExpressions, joined by '||'
"""
pyBooleanOperator = ' or '
def __init__(self,conditionalAndExprList):
if isinstance(conditionalAndExprList,list):
self._list = conditionalAndExprList
else:
self._list = [conditionalAndExprList]
def __repr__(self):
return "<ConditionalExpressionList: %s>"%self._list
class ParsedRelationalExpressionList(ListRedirect):
"""
A list of RelationalExpressions, joined by '&&'s
"""
pyBooleanOperator = ' and '
def __init__(self,relationalExprList):
if isinstance(relationalExprList,list):
self._list = relationalExprList
else:
self._list = [relationalExprList]
def __repr__(self):
return "<RelationalExpressionList: %s>"%self._list
class ParsedPrefixedMultiplicativeExpressionList(ListRedirect):
"""
A ParsedMultiplicativeExpressionList lead by a '+' or '-'
"""
def __init__(self,prefix,mulExprList):
self.prefix = prefix
assert prefix != '-',"arithmetic '-' operator not supported"
if isinstance(mulExprList,list):
self._list = mulExprList
else:
self._list = [mulExprList]
def __repr__(self):
return "%s %s"%(self.prefix,self.reduce())
class ParsedMultiplicativeExpressionList(ListRedirect):
"""
A list of UnaryExpressions, joined by '/' or '*' s
"""
def __init__(self,unaryExprList):
if isinstance(unaryExprList,list):
self._list = unaryExprList
else:
self._list = [unaryExprList]
def __repr__(self):
return "<MultiplicativeExpressionList: %s>"%self.reduce()
class ParsedAdditiveExpressionList(ListRedirect):
"""
A list of MultiplicativeExpressions, joined by '+' or '-' s
"""
def __init__(self,multiplicativeExprList):
if isinstance(multiplicativeExprList,list):
self._list = multiplicativeExprList
else:
self._list = [multiplicativeExprList]
def __repr__(self):
return "<AdditiveExpressionList: %s>"%self._list
class ParsedString(unicode):
def __init__(self,value=None):
val = value is None and u"" or value
super(ParsedString,self).__init__(val)
class ParsedDatatypedLiteral(object):
"""
Placeholder for Datatyped literals
This is neccessary (instead of instanciating Literals directly)
when datatypes IRIRefs are QNames (in which case the prefix needs to be resolved at some point)
"""
def __init__(self,value,dType):
self.value = value
self.dataType = dType
def __repr__(self):
return "'%s'^^%s"%(self.value,self.dataType) | agpl-3.0 |
languagetool-org/languagetool | languagetool-language-modules/sr/src/main/resources/org/languagetool/resource/sr/script/gettags.py | 4 | 3785 | #!/usr/bin/env python3
# coding: utf-8
"""
Program reads input file line by line, matching PoS tags. They are written
to the output file in order of appearance. Each tag is written to output
file only once.
"""
import argparse
import logging
import re
import os
import sys
_args_ = None
_logger_ = None
_out_file_ = None
LOG_FORMAT = '%(asctime)-15s %(levelname)s %(message)s'
# Types of regex to match input, selectable from command line
REGEX_TYPE = {
"lex" : "^([!\"\'\(\),\-\.:;\?]|[a-zฤฤลพลกฤรขรฎรด๏ฌวรผA-Zฤฤลฝล ฤ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zฤฤลพลกฤรขรฎรด๏ฌวรผA-Zฤฤลฝล ฤ0-9_\-]+)\s+([a-zA-Z0-9\-]+)*",
"wac" : "^([a-zฤฤลพลกฤรขรฎรด๏ฌวรผA-Zฤฤลฝล ฤ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zฤฤลพลกฤรขรฎรด๏ฌวรผA-Zฤฤลฝล ฤ0-9_\-]+)\s+([!\"\'\(\),\-\.:;\?]|[a-zฤฤลพลกฤรขรฎรด๏ฌวรผA-Zฤฤลฝล ฤ0-9_\-]+)\s+([a-zA-Z0-9\-]+)*"
}
def parse_args():
parser = argparse.ArgumentParser(description='Processes file containing Serbian word corpus.')
parser.add_argument('-b', '--base-dir', default='/tmp')
parser.add_argument('-d', '--debug', action ='store_true', default=False)
parser.add_argument('-i', '--input-file', default=None)
parser.add_argument('-n', '--first-n-lines', default=0, type=int)
parser.add_argument('-o', '--output-file', default='out.txt')
parser.add_argument('-r', '--regex', default=None)
global _args_, _logger_
_args_ = parser.parse_args()
if _args_.debug:
_logger_.setLevel( logging.DEBUG )
else:
_logger_.setLevel( logging.INFO )
_logger_.debug( "Command-line arguments: {}".format(_args_) )
if not _args_.input_file:
_logger_.error("Input file was not specified, aborting ...")
sys.exit(1)
if not _args_.regex:
sys.exit(1)
if not os.path.exists(_args_.input_file):
_logger_.error("Unable to open file '{}', aborting ...".format(_args_.input_file))
sys.exit(1)
def init():
global _logger_
logging.basicConfig(format=LOG_FORMAT)
_logger_ = logging.getLogger("lex2lt")
def open_out_file():
global _out_file_
_out_file_ = open(_args_.output_file, "w")
def close_out_file():
_out_file_.close()
# Parse input file
def parse_file():
cnt = 0
matchcnt = 0
tags = []
if _args_.regex in REGEX_TYPE:
pattern = re.compile(REGEX_TYPE[ _args_.regex ])
else:
_logger_.error("Regular expression of type '{}' does not exist in configuration, aborting ...".format(_args_.regex))
sys.exit(1)
_logger_.info("Started processing input file '{}' ...".format(_args_.input_file))
with open(_args_.input_file) as f:
for line in f:
# Remove end of line
line = line.strip()
cnt += 1
# Check if line matches regex
match = pattern.match(line)
if match:
matchcnt += 1
_logger_.debug("Matched groups: {}".format(match.groups()))
if len(match.groups()) < 4:
posgr = match.group(3)
elif len(match.groups()) < 5:
posgr = match.group(4)
_logger_.debug('posgr={}'.format(posgr))
if posgr not in tags:
tags.append( posgr )
_out_file_.write("{}\n".format(posgr))
else:
_logger_.warn("Unmatched line: {}".format(line))
if cnt > _args_.first_n_lines > 0:
break
f.close()
_logger_.info("Finished processing input file '{}': total {} lines, {} matching lines.".format(_args_.input_file, cnt, matchcnt))
if __name__ == "__main__":
init()
parse_args()
open_out_file()
parse_file()
close_out_file()
| lgpl-2.1 |
benoitsteiner/tensorflow-xsmm | tensorflow/contrib/kfac/examples/convnet_mnist_distributed_main.py | 15 | 2254 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train a ConvNet on MNIST using K-FAC.
Distributed training with sync replicas optimizer. See
`convnet.train_mnist_distributed_sync_replicas` for details.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.contrib.kfac.examples import convnet
FLAGS = flags.FLAGS
flags.DEFINE_integer("task", -1, "Task identifier")
flags.DEFINE_string("data_dir", "/tmp/mnist", "local mnist dir")
flags.DEFINE_string(
"cov_inv_op_strategy", "chief_worker",
"In dist training mode run the cov, inv ops on chief or dedicated workers."
)
flags.DEFINE_string("master", "local", "Session master.")
flags.DEFINE_integer("ps_tasks", 2,
"Number of tasks in the parameter server job.")
flags.DEFINE_integer("replicas_to_aggregate", 5,
"Number of replicas to aggregate.")
flags.DEFINE_integer("worker_replicas", 5, "Number of replicas in worker job.")
flags.DEFINE_integer("num_epochs", None, "Number of epochs.")
def _is_chief():
"""Determines whether a job is the chief worker."""
if "chief_worker" in FLAGS.brain_jobs:
return FLAGS.brain_job_name == "chief_worker"
else:
return FLAGS.task == 0
def main(unused_argv):
_ = unused_argv
convnet.train_mnist_distributed_sync_replicas(
FLAGS.task, _is_chief(), FLAGS.worker_replicas, FLAGS.ps_tasks,
FLAGS.master, FLAGS.data_dir, FLAGS.num_epochs, FLAGS.cov_inv_op_strategy)
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
alu042/edx-platform | openedx/core/djangoapps/profile_images/tests/test_views.py | 29 | 21972 | """
Test cases for the HTTP endpoints of the profile image api.
"""
from contextlib import closing
import datetime
from pytz import UTC
import unittest
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import ddt
import mock
from mock import patch
from PIL import Image
from rest_framework.test import APITestCase, APIClient
from student.tests.factories import UserFactory
from student.tests.tests import UserSettingsEventTestMixin
from openedx.core.djangoapps.user_api.accounts.image_helpers import (
set_has_profile_image,
get_profile_image_names,
get_profile_image_storage,
)
from ..images import create_profile_images, ImageValidationError
from ..views import LOG_MESSAGE_CREATE, LOG_MESSAGE_DELETE
from .helpers import make_image_file
TEST_PASSWORD = "test"
TEST_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 01, tzinfo=UTC)
TEST_UPLOAD_DT2 = datetime.datetime(2003, 1, 9, 15, 43, 01, tzinfo=UTC)
class PatchedClient(APIClient):
"""
Patch DRF's APIClient to avoid a unicode error on file upload.
Famous last words: This is a *temporary* fix that we should be
able to remove once we upgrade Django past 1.4.
"""
def request(self, *args, **kwargs):
"""Construct an API request. """
# DRF's default test client implementation uses `six.text_type()`
# to convert the CONTENT_TYPE to `unicode`. In Django 1.4,
# this causes a `UnicodeDecodeError` when Django parses a multipart
# upload.
#
# This is the DRF code we're working around:
# https://github.com/tomchristie/django-rest-framework/blob/3.1.3/rest_framework/compat.py#L227
#
# ... and this is the Django code that raises the exception:
#
# https://github.com/django/django/blob/1.4.22/django/http/multipartparser.py#L435
#
# Django unhelpfully swallows the exception, so to the application code
# it appears as though the user didn't send any file data.
#
# This appears to be an issue only with requests constructed in the test
# suite, not with the upload code used in production.
#
if isinstance(kwargs.get("CONTENT_TYPE"), basestring):
kwargs["CONTENT_TYPE"] = str(kwargs["CONTENT_TYPE"])
return super(PatchedClient, self).request(*args, **kwargs)
class ProfileImageEndpointMixin(UserSettingsEventTestMixin):
"""
Base class / shared infrastructure for tests of profile_image "upload" and
"remove" endpoints.
"""
# subclasses should override this with the name of the view under test, as
# per the urls.py configuration.
_view_name = None
client_class = PatchedClient
def setUp(self):
super(ProfileImageEndpointMixin, self).setUp()
self.user = UserFactory.create(password=TEST_PASSWORD)
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.save()
self.url = reverse(self._view_name, kwargs={'username': self.user.username})
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.storage = get_profile_image_storage()
self.table = 'auth_userprofile'
# this assertion is made here as a sanity check because all tests
# assume user.profile.has_profile_image is False by default
self.assertFalse(self.user.profile.has_profile_image)
# Reset the mock event tracker so that we're not considering the
# initial profile creation events.
self.reset_tracker()
def tearDown(self):
super(ProfileImageEndpointMixin, self).tearDown()
for name in get_profile_image_names(self.user.username).values():
self.storage.delete(name)
def check_images(self, exist=True):
"""
If exist is True, make sure the images physically exist in storage
with correct sizes and formats.
If exist is False, make sure none of the images exist.
"""
for size, name in get_profile_image_names(self.user.username).items():
if exist:
self.assertTrue(self.storage.exists(name))
with closing(Image.open(self.storage.path(name))) as img:
self.assertEqual(img.size, (size, size))
self.assertEqual(img.format, 'JPEG')
else:
self.assertFalse(self.storage.exists(name))
def check_response(self, response, expected_code, expected_developer_message=None, expected_user_message=None):
"""
Make sure the response has the expected code, and if that isn't 204,
optionally check the correctness of a developer-facing message.
"""
self.assertEqual(expected_code, response.status_code)
if expected_code == 204:
self.assertIsNone(response.data)
else:
if expected_developer_message is not None:
self.assertEqual(response.data.get('developer_message'), expected_developer_message)
if expected_user_message is not None:
self.assertEqual(response.data.get('user_message'), expected_user_message)
def check_has_profile_image(self, has_profile_image=True):
"""
Make sure the value of self.user.profile.has_profile_image is what we
expect.
"""
# it's necessary to reload this model from the database since save()
# would have been called on another instance.
profile = self.user.profile.__class__.objects.get(user=self.user)
self.assertEqual(profile.has_profile_image, has_profile_image)
def check_anonymous_request_rejected(self, method):
"""
Make sure that the specified method rejects access by unauthorized users.
"""
anonymous_client = APIClient()
request_method = getattr(anonymous_client, method)
response = request_method(self.url)
self.check_response(response, 401)
self.assert_no_events_were_emitted()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewGeneralTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the profile image endpoint
"""
_view_name = "accounts_profile_image_api"
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, and PATCH are not supported.
"""
self.assertEqual(405, self.client.get(self.url).status_code)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewPostTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the POST method of the profile_image api endpoint.
"""
_view_name = "accounts_profile_image_api"
# Use the patched version of the API client to workaround a unicode issue
# with DRF 3.1 and Django 1.4. Remove this after we upgrade Django past 1.4!
def check_upload_event_emitted(self, old=None, new=TEST_UPLOAD_DT):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=old, new=new
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call POST.
"""
self.check_anonymous_request_rejected('post')
self.assertFalse(mock_log.info.called)
@ddt.data('.jpg', '.jpeg', '.jpg', '.jpeg', '.png', '.gif', '.GIF')
@patch(
'openedx.core.djangoapps.profile_images.views._make_upload_dt',
side_effect=[TEST_UPLOAD_DT, TEST_UPLOAD_DT2],
)
def test_upload_self(self, extension, _mock_make_image_version, mock_log):
"""
Test that an authenticated user can POST to their own upload endpoint.
"""
with make_image_file(extension=extension) as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
# Try another upload and make sure that a second event is emitted.
with make_image_file() as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_upload_event_emitted(old=TEST_UPLOAD_DT, new=TEST_UPLOAD_DT2)
@ddt.data(
('image/jpeg', '.jpg'),
('image/jpeg', '.jpeg'),
('image/pjpeg', '.jpg'),
('image/pjpeg', '.jpeg'),
('image/png', '.png'),
('image/gif', '.gif'),
('image/gif', '.GIF'),
)
@ddt.unpack
@patch('openedx.core.djangoapps.profile_images.views._make_upload_dt', return_value=TEST_UPLOAD_DT)
def test_upload_by_mimetype(self, content_type, extension, _mock_make_image_version, mock_log):
"""
Test that a user can upload raw content with the appropriate mimetype
"""
with make_image_file(extension=extension) as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type=content_type,
HTTP_CONTENT_DISPOSITION='attachment;filename=filename{}'.format(extension),
)
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
def test_upload_unsupported_mimetype(self, mock_log):
"""
Test that uploading an unsupported image as raw content fails with an
HTTP 415 Error.
"""
with make_image_file() as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type='image/tiff',
HTTP_CONTENT_DISPOSITION='attachment;filename=filename.tiff',
)
self.check_response(response, 415)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_other(self, mock_log):
"""
Test that an authenticated user cannot POST to another user's upload
endpoint.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = different_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 404)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_staff(self, mock_log):
"""
Test that an authenticated staff cannot POST to another user's upload
endpoint.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = staff_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 403)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_missing_file(self, mock_log):
"""
Test that omitting the file entirely from the POST results in HTTP 400.
"""
response = self.client.post(self.url, {}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"No file provided for profile image",
expected_user_message=u"No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_not_a_file(self, mock_log):
"""
Test that sending unexpected data that isn't a file results in HTTP
400.
"""
response = self.client.post(self.url, {'file': 'not a file'}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"No file provided for profile image",
expected_user_message=u"No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_upload_validation(self, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
with make_image_file() as image_file:
with mock.patch(
'openedx.core.djangoapps.profile_images.views.validate_uploaded_image',
side_effect=ImageValidationError(u"test error message")
):
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(
response, 400,
expected_developer_message=u"test error message",
expected_user_message=u"test error message",
)
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@patch('PIL.Image.open')
def test_upload_failure(self, image_open, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
image_open.side_effect = [Exception(u"whoops"), None]
with make_image_file() as image_file:
with self.assertRaises(Exception):
self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_images(False)
self.check_has_profile_image(False)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewDeleteTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the DELETE method of the profile_image endpoint.
"""
_view_name = "accounts_profile_image_api"
def setUp(self):
super(ProfileImageViewDeleteTestCase, self).setUp()
with make_image_file() as image_file:
create_profile_images(image_file, get_profile_image_names(self.user.username))
self.check_images()
set_has_profile_image(self.user.username, True, TEST_UPLOAD_DT)
# Ignore previous event
self.reset_tracker()
def check_remove_event_emitted(self):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=TEST_UPLOAD_DT, new=None
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call DELETE.
"""
self.check_anonymous_request_rejected('delete')
self.assertFalse(mock_log.info.called)
def test_remove_self(self, mock_log):
"""
Test that an authenticated user can DELETE to remove their own profile
images.
"""
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
def test_remove_other(self, mock_log):
"""
Test that an authenticated user cannot DELETE to remove another user's
profile images.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
response = different_client.delete(self.url)
self.check_response(response, 404)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_remove_staff(self, mock_log):
"""
Test that an authenticated staff user can DELETE to remove another user's
profile images.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': get_profile_image_names(self.user.username).values(), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
@patch('student.models.UserProfile.save')
def test_remove_failure(self, user_profile_save, mock_log):
"""
Test that when remove validation fails, the proper HTTP response and
messages are returned.
"""
user_profile_save.side_effect = [Exception(u"whoops"), None]
with self.assertRaises(Exception):
self.client.delete(self.url)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
class DeprecatedProfileImageTestMixin(ProfileImageEndpointMixin):
"""
Actual tests for DeprecatedProfileImage.*TestCase classes defined here.
Requires:
self._view_name
self._replacement_method
"""
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, PATCH, and DELETE are not supported.
"""
self.assertEqual(405, self.client.get(self.url).status_code)
self.assertEqual(405, self.client.put(self.url).status_code)
self.assertEqual(405, self.client.patch(self.url).status_code)
self.assertEqual(405, self.client.delete(self.url).status_code)
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
def test_post_calls_replacement_view_method(self, mock_log):
"""
Test that calls to this view pass through the the new view.
"""
with patch(self._replacement_method) as mock_method:
mock_method.return_value = HttpResponse()
self.client.post(self.url)
assert mock_method.called
self.assertFalse(mock_log.info.called)
self.assert_no_events_were_emitted()
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageUploadTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image upload endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = 'profile_image_upload'
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.post'
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Profile Image API is only supported in LMS')
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageRemoveTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image remove endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = "profile_image_remove"
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.delete'
| agpl-3.0 |
j-carl/ansible | lib/ansible/plugins/inventory/ini.py | 2 | 17394 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: ini
version_added: "2.4"
short_description: Uses an Ansible INI file as inventory source.
description:
- INI file based inventory, sections are groups or group related with special `:modifiers`.
- Entries in sections C([group_1]) are hosts, members of the group.
- Hosts can have variables defined inline as key/value pairs separated by C(=).
- The C(children) modifier indicates that the section contains groups.
- The C(vars) modifier indicates that the section contains variables assigned to members of the group.
- Anything found outside a section is considered an 'ungrouped' host.
- Values passed in the INI format using the ``key=value`` syntax are interpreted differently depending on where they are declared within your inventory.
- When declared inline with the host, INI values are processed by Python's ast.literal_eval function
(U(https://docs.python.org/2/library/ast.html#ast.literal_eval)) and interpreted as Python literal structures
(strings, numbers, tuples, lists, dicts, booleans, None). Host lines accept multiple C(key=value) parameters per line.
Therefore they need a way to indicate that a space is part of a value rather than a separator.
- When declared in a C(:vars) section, INI values are interpreted as strings. For example C(var=FALSE) would create a string equal to C(FALSE).
Unlike host lines, C(:vars) sections accept only a single entry per line, so everything after the C(=) must be the value for the entry.
- Do not rely on types set during definition, always make sure you specify type with a filter when needed when consuming the variable.
- See the Examples for proper quoting to prevent changes to variable type.
notes:
- Whitelisted in configuration by default.
- Consider switching to YAML format for inventory sources to avoid confusion on the actual type of a variable.
The YAML inventory plugin processes variable values consistently and correctly.
'''
EXAMPLES = '''# fmt: ini
# Example 1
[web]
host1
host2 ansible_port=222 # defined inline, interpreted as an integer
[web:vars]
http_port=8080 # all members of 'web' will inherit these
myvar=23 # defined in a :vars section, interpreted as a string
[web:children] # child groups will automatically add their hosts to parent group
apache
nginx
[apache]
tomcat1
tomcat2 myvar=34 # host specific vars override group vars
tomcat3 mysecret="'03#pa33w0rd'" # proper quoting to prevent value changes
[nginx]
jenkins1
[nginx:vars]
has_java = True # vars in child groups override same in parent
[all:vars]
has_java = False # 'all' is 'top' parent
# Example 2
host1 # this is 'ungrouped'
# both hosts have same IP but diff ports, also 'ungrouped'
host2 ansible_host=127.0.0.1 ansible_port=44
host3 ansible_host=127.0.0.1 ansible_port=45
[g1]
host4
[g2]
host4 # same host as above, but member of 2 groups, will inherit vars from both
# inventory hostnames are unique
'''
import ast
import re
from ansible.inventory.group import to_safe_group_name
from ansible.plugins.inventory import BaseFileInventoryPlugin
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.shlex import shlex_split
class InventoryModule(BaseFileInventoryPlugin):
"""
Takes an INI-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
NAME = 'ini'
_COMMENT_MARKERS = frozenset((u';', u'#'))
b_COMMENT_MARKERS = frozenset((b';', b'#'))
def __init__(self):
super(InventoryModule, self).__init__()
self.patterns = {}
self._filename = None
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._filename = path
try:
# Read in the hosts, groups, and variables defined in the inventory file.
if self.loader:
(b_data, private) = self.loader._get_file_contents(path)
else:
b_path = to_bytes(path, errors='surrogate_or_strict')
with open(b_path, 'rb') as fh:
b_data = fh.read()
try:
# Faster to do to_text once on a long string than many
# times on smaller strings
data = to_text(b_data, errors='surrogate_or_strict').splitlines()
except UnicodeError:
# Handle non-utf8 in comment lines: https://github.com/ansible/ansible/issues/17593
data = []
for line in b_data.splitlines():
if line and line[0] in self.b_COMMENT_MARKERS:
# Replace is okay for comment lines
# data.append(to_text(line, errors='surrogate_then_replace'))
# Currently we only need these lines for accurate lineno in errors
data.append(u'')
else:
# Non-comment lines still have to be valid uf-8
data.append(to_text(line, errors='surrogate_or_strict'))
self._parse(path, data)
except Exception as e:
raise AnsibleParserError(e)
def _raise_error(self, message):
raise AnsibleError("%s:%d: " % (self._filename, self.lineno) + message)
def _parse(self, path, lines):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We behave as though the first line of the inventory is '[ungrouped]',
# and begin to look for host definitions. We make a single pass through
# each line of the inventory, building up self.groups and adding hosts,
# subgroups, and setting variables as we go.
pending_declarations = {}
groupname = 'ungrouped'
state = 'hosts'
self.lineno = 0
for line in lines:
self.lineno += 1
line = line.strip()
# Skip empty lines and comments
if not line or line[0] in self._COMMENT_MARKERS:
continue
# Is this a [section] header? That tells us what group we're parsing
# definitions for, and what kind of definitions to expect.
m = self.patterns['section'].match(line)
if m:
(groupname, state) = m.groups()
groupname = to_safe_group_name(groupname)
state = state or 'hosts'
if state not in ['hosts', 'children', 'vars']:
title = ":".join(m.groups())
self._raise_error("Section [%s] has unknown type: %s" % (title, state))
# If we haven't seen this group before, we add a new Group.
if groupname not in self.inventory.groups:
# Either [groupname] or [groupname:children] is sufficient to declare a group,
# but [groupname:vars] is allowed only if the # group is declared elsewhere.
# We add the group anyway, but make a note in pending_declarations to check at the end.
#
# It's possible that a group is previously pending due to being defined as a child
# group, in that case we simply pass so that the logic below to process pending
# declarations will take the appropriate action for a pending child group instead of
# incorrectly handling it as a var state pending declaration
if state == 'vars' and groupname not in pending_declarations:
pending_declarations[groupname] = dict(line=self.lineno, state=state, name=groupname)
self.inventory.add_group(groupname)
# When we see a declaration that we've been waiting for, we process and delete.
if groupname in pending_declarations and state != 'vars':
if pending_declarations[groupname]['state'] == 'children':
self._add_pending_children(groupname, pending_declarations)
elif pending_declarations[groupname]['state'] == 'vars':
del pending_declarations[groupname]
continue
elif line.startswith('[') and line.endswith(']'):
self._raise_error("Invalid section entry: '%s'. Please make sure that there are no spaces" % line + " " +
"in the section entry, and that there are no other invalid characters")
# It's not a section, so the current state tells us what kind of
# definition it must be. The individual parsers will raise an
# error if we feed them something they can't digest.
# [groupname] contains host definitions that must be added to
# the current group.
if state == 'hosts':
hosts, port, variables = self._parse_host_definition(line)
self._populate_host_vars(hosts, variables, groupname, port)
# [groupname:vars] contains variable definitions that must be
# applied to the current group.
elif state == 'vars':
(k, v) = self._parse_variable_definition(line)
self.inventory.set_variable(groupname, k, v)
# [groupname:children] contains subgroup names that must be
# added as children of the current group. The subgroup names
# must themselves be declared as groups, but as before, they
# may only be declared later.
elif state == 'children':
child = self._parse_group_name(line)
if child not in self.inventory.groups:
if child not in pending_declarations:
pending_declarations[child] = dict(line=self.lineno, state=state, name=child, parents=[groupname])
else:
pending_declarations[child]['parents'].append(groupname)
else:
self.inventory.add_child(groupname, child)
else:
# This can happen only if the state checker accepts a state that isn't handled above.
self._raise_error("Entered unhandled state: %s" % (state))
# Any entries in pending_declarations not removed by a group declaration above mean that there was an unresolved reference.
# We report only the first such error here.
for g in pending_declarations:
decl = pending_declarations[g]
if decl['state'] == 'vars':
raise AnsibleError("%s:%d: Section [%s:vars] not valid for undefined group: %s" % (path, decl['line'], decl['name'], decl['name']))
elif decl['state'] == 'children':
raise AnsibleError("%s:%d: Section [%s:children] includes undefined group: %s" % (path, decl['line'], decl['parents'].pop(), decl['name']))
def _add_pending_children(self, group, pending):
for parent in pending[group]['parents']:
self.inventory.add_child(parent, group)
if parent in pending and pending[parent]['state'] == 'children':
self._add_pending_children(parent, pending)
del pending[group]
def _parse_group_name(self, line):
'''
Takes a single line and tries to parse it as a group name. Returns the
group name if successful, or raises an error.
'''
m = self.patterns['groupname'].match(line)
if m:
return m.group(1)
self._raise_error("Expected group name, got: %s" % (line))
def _parse_variable_definition(self, line):
'''
Takes a string and tries to parse it as a variable definition. Returns
the key and value if successful, or raises an error.
'''
# TODO: We parse variable assignments as a key (anything to the left of
# an '='"), an '=', and a value (anything left) and leave the value to
# _parse_value to sort out. We should be more systematic here about
# defining what is acceptable, how quotes work, and so on.
if '=' in line:
(k, v) = [e.strip() for e in line.split("=", 1)]
return (k, self._parse_value(v))
self._raise_error("Expected key=value, got: %s" % (line))
def _parse_host_definition(self, line):
'''
Takes a single line and tries to parse it as a host definition. Returns
a list of Hosts if successful, or raises an error.
'''
# A host definition comprises (1) a non-whitespace hostname or range,
# optionally followed by (2) a series of key="some value" assignments.
# We ignore any trailing whitespace and/or comments. For example, here
# are a series of host definitions in a group:
#
# [groupname]
# alpha
# beta:2345 user=admin # we'll tell shlex
# gamma sudo=True user=root # to ignore comments
try:
tokens = shlex_split(line, comments=True)
except ValueError as e:
self._raise_error("Error parsing host definition '%s': %s" % (line, e))
(hostnames, port) = self._expand_hostpattern(tokens[0])
# Try to process anything remaining as a series of key=value pairs.
variables = {}
for t in tokens[1:]:
if '=' not in t:
self._raise_error("Expected key=value host variable assignment, got: %s" % (t))
(k, v) = t.split('=', 1)
variables[k] = self._parse_value(v)
return hostnames, port, variables
def _expand_hostpattern(self, hostpattern):
'''
do some extra checks over normal processing
'''
# specification?
hostnames, port = super(InventoryModule, self)._expand_hostpattern(hostpattern)
if hostpattern.strip().endswith(':') and port is None:
raise AnsibleParserError("Invalid host pattern '%s' supplied, ending in ':' is not allowed, this character is reserved to provide a port." %
hostpattern)
for pattern in hostnames:
# some YAML parsing prevention checks
if pattern.strip() == '---':
raise AnsibleParserError("Invalid host pattern '%s' supplied, '---' is normally a sign this is a YAML file." % hostpattern)
return (hostnames, port)
@staticmethod
def _parse_value(v):
'''
Attempt to transform the string value from an ini file into a basic python object
(int, dict, list, unicode string, etc).
'''
try:
v = ast.literal_eval(v)
# Using explicit exceptions.
# Likely a string that literal_eval does not like. We wil then just set it.
except ValueError:
# For some reason this was thought to be malformed.
pass
except SyntaxError:
# Is this a hash with an equals at the end?
pass
return to_text(v, nonstring='passthru', errors='surrogate_or_strict')
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and
stores them in self.patterns.
'''
# Section names are square-bracketed expressions at the beginning of a
# line, comprising (1) a group name optionally followed by (2) a tag
# that specifies the contents of the section. We ignore any trailing
# whitespace and/or comments. For example:
#
# [groupname]
# [somegroup:vars]
# [naughty:children] # only get coal in their stockings
self.patterns['section'] = re.compile(
to_text(r'''^\[
([^:\]\s]+) # group name (see groupname below)
(?::(\w+))? # optional : and tag name
\]
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
# FIXME: What are the real restrictions on group names, or rather, what
# should they be? At the moment, they must be non-empty sequences of non
# whitespace characters excluding ':' and ']', but we should define more
# precise rules in order to support better diagnostics.
self.patterns['groupname'] = re.compile(
to_text(r'''^
([^:\]\s]+)
\s* # ignore trailing whitespace
(?:\#.*)? # and/or a comment till the
$ # end of the line
''', errors='surrogate_or_strict'), re.X
)
| gpl-3.0 |
CSD-Public/stonix | src/tests/rules/unit_tests/zzzTestRuleDisableOpenSafeSafari.py | 1 | 4752 | ###############################################################################
# #
# Copyright 2019. Triad National Security, LLC. All rights reserved. #
# This program was produced under U.S. Government contract 89233218CNA000001 #
# for Los Alamos National Laboratory (LANL), which is operated by Triad #
# National Security, LLC for the U.S. Department of Energy/National Nuclear #
# Security Administration. #
# #
# All rights in the program are reserved by Triad National Security, LLC, and #
# the U.S. Department of Energy/National Nuclear Security Administration. The #
# Government is granted for itself and others acting on its behalf a #
# nonexclusive, paid-up, irrevocable worldwide license in this material to #
# reproduce, prepare derivative works, distribute copies to the public, #
# perform publicly and display publicly, and to permit others to do so. #
# #
###############################################################################
'''
This is a Unit Test for Rule DisableOpenSafeSafari
Created on Jan 22, 2015
@author: dwalker
@change: 2015-02-25 - ekkehard - Updated to make unit test work
@change: 2016/02/10 roy Added sys.path.append for being able to unit test this
file as well as with the test harness.
'''
import unittest
import sys
sys.path.append("../../../..")
from src.tests.lib.RuleTestTemplate import RuleTest
from src.stonix_resources.CommandHelper import CommandHelper
from src.tests.lib.logdispatcher_mock import LogPriority
from src.stonix_resources.rules.DisableOpenSafeSafari import DisableOpenSafeSafari
class zzzTestRuleDisableOpenSafeSafari(RuleTest):
def setUp(self):
RuleTest.setUp(self)
self.rule = DisableOpenSafeSafari(self.config,
self.environ,
self.logdispatch,
self.statechglogger)
self.rulename = self.rule.rulename
self.rulenumber = self.rule.rulenumber
self.ch = CommandHelper(self.logdispatch)
self.dc = "/usr/bin/defaults"
self.path = "com.apple.Safari"
self.key = "AutoOpenSafeDownloads"
def tearDown(self):
pass
def runTest(self):
self.simpleRuleTest()
def setConditionsForRule(self):
'''This makes sure the intial report fails by executing the following
commands:
defaults write com.apple.Safari AutoOpenSafeDownloads -bool yes
:param self: essential if you override this definition
:returns: boolean - If successful True; If failure False
@author: dwalker
'''
success = False
cmd = [self.dc, "write", self.path, self.key, "-bool", "yes"]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
success = self.checkReportForRule(False, True)
return success
def checkReportForRule(self, pCompliance, pRuleSuccess):
'''To see what happended run these commands:
defaults read com.apple.Safari AutoOpenSafeDownloads
:param self: essential if you override this definition
:param pCompliance:
:param pRuleSuccess:
:returns: boolean - If successful True; If failure False
@author: ekkehard j. koch
'''
success = True
self.logdispatch.log(LogPriority.DEBUG, "pCompliance = " + \
str(pCompliance) + ".")
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
cmd = [self.dc, "read", self.path, self.key]
self.logdispatch.log(LogPriority.DEBUG, str(cmd))
if self.ch.executeCommand(cmd):
output = self.ch.getOutputString()
return success
def checkFixForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(True, pRuleSuccess)
return success
def checkUndoForRule(self, pRuleSuccess):
self.logdispatch.log(LogPriority.DEBUG, "pRuleSuccess = " + \
str(pRuleSuccess) + ".")
success = self.checkReportForRule(False, pRuleSuccess)
return success
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-2.0 |
tchx84/debian-pkg-sugar | extensions/cpsection/keyboard/model.py | 11 | 6025 | # Copyright (C) 2013 Sugar Labs
# Copyright (C) 2009 OLPC
# Author: Sayamindu Dasgupta <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from gi.repository import Xkl
from gi.repository import Gio
_GROUP_NAME = 'grp' # The XKB name for group switch options
_KEYBOARD_DIR = 'org.sugarlabs.peripherals.keyboard'
_LAYOUTS_KEY = 'layouts'
_OPTIONS_KEY = 'options'
_MODEL_KEY = 'model'
class KeyboardManager(object):
def __init__(self, display):
self._engine = Xkl.Engine.get_instance(display)
self._configregistry = Xkl.ConfigRegistry.get_instance(self._engine)
self._configregistry.load(False)
self._configrec = Xkl.ConfigRec()
self._configrec.get_from_server(self._engine)
self._settings = Gio.Settings(_KEYBOARD_DIR)
def _populate_one(self, config_registry, item, store):
store.append([item.get_description(), item.get_name()])
def _populate_two(self, config_registry, item, subitem, store):
layout = item.get_name()
if subitem:
description = '%s, %s' % (subitem.get_description(),
item.get_description())
variant = subitem.get_name()
else:
description = 'Default layout, %s' % item.get_description()
variant = ''
store.append([description, ('%s(%s)' % (layout, variant))])
def get_models(self):
"""Return list of supported keyboard models"""
models = []
self._configregistry.foreach_model(self._populate_one, models)
models.sort()
return models
def get_languages(self):
"""Return list of supported keyboard languages"""
languages = []
self._configregistry.foreach_language(self._populate_one, languages)
languages.sort()
return languages
def get_layouts_for_language(self, language):
"""Return list of supported keyboard layouts for a given language"""
layouts = []
self._configregistry.foreach_language_variant(language,
self._populate_two,
layouts)
layouts.sort()
return layouts
def get_options_group(self):
"""Return list of supported options for switching keyboard group"""
options = []
self._configregistry.foreach_option(_GROUP_NAME, self._populate_one,
options)
options.sort()
return options
def get_current_model(self):
"""Return the enabled keyboard model"""
model = self._settings.get_string(_MODEL_KEY)
if not model:
model = self._configrec.model
self.set_model(model)
return model
def get_current_layouts(self):
"""Return the enabled keyboard layouts with variants"""
layouts = self._settings.get_strv(_LAYOUTS_KEY)
if layouts:
return layouts
layouts = self._configrec.layouts
variants = self._configrec.variants
layout_list = []
i = 0
for layout in layouts:
if len(variants) <= i or variants[i] == '':
layout_list.append('%s(%s)' % (layout, ''))
else:
layout_list.append('%s(%s)' % (layout, variants[i]))
i += 1
self.set_layouts(layout_list)
return layout_list
def get_current_option_group(self):
"""Return the enabled option for switching keyboard group"""
options = self._settings.get_strv(_OPTIONS_KEY)
if not options:
options = self._configrec.options
self.set_option_group(options)
for option in options:
if option.startswith(_GROUP_NAME):
return option
return None
def get_max_layouts(self):
"""Return the maximum number of layouts supported simultaneously"""
return self._engine.get_max_num_groups()
def set_model(self, model):
"""Sets the supplied keyboard model"""
if model is None or not model:
return
self._settings.set_string(_MODEL_KEY, model)
self._configrec.set_model(model)
self._configrec.activate(self._engine)
def set_option_group(self, option_group):
"""Sets the supplied option for switching keyboard group"""
# XXX: Merge, not overwrite previous options
if not option_group:
options = ['']
elif isinstance(option_group, list):
options = option_group
else:
options = [option_group]
self._settings.set_strv(_OPTIONS_KEY, options)
self._configrec.set_options(options)
self._configrec.activate(self._engine)
def set_layouts(self, layouts):
"""Sets the supplied keyboard layouts (with variants)"""
if layouts is None or not layouts:
return
self._settings.set_strv(_LAYOUTS_KEY, layouts)
layouts_list = []
variants_list = []
for layout in layouts:
layouts_list.append(layout.split('(')[0])
variants_list.append(layout.split('(')[1][:-1])
self._configrec.set_layouts(layouts_list)
self._configrec.set_variants(variants_list)
self._configrec.activate(self._engine)
| gpl-2.0 |
yanchen036/tensorflow | tensorflow/contrib/learn/python/learn/learn_runner_test.py | 76 | 16067 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""learn_main tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from tensorflow.contrib.learn.python.learn import evaluable # pylint: disable=g-import-not-at-top
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import run_config as run_config_lib
from tensorflow.contrib.training.python.training import hparam as hparam_lib
from tensorflow.python.estimator import run_config as core_run_config_lib
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
patch = test.mock.patch
_MODIR_DIR = "/tmp"
_HPARAMS = hparam_lib.HParams(learning_rate=0.01)
_MUST_SPECIFY_OUTPUT_DIR_MSG = "Must specify an output directory"
_MISSING_MODEL_DIR_ERR_MSG = (
"Must specify a model directory `model_dir` in `run_config`.")
_EXP_NOT_CALLABLE_MSG = "Experiment builder .* is not callable"
_INVALID_HPARAMS_ERR_MSG = "`hparams` must be `HParams` instance"
_NOT_EXP_TYPE_MSG = "Experiment builder did not return an Experiment"
_NON_EXIST_TASK_MSG = "Schedule references non-existent task"
_NON_CALLABLE_MSG = "Schedule references non-callable member"
_MUST_SPECIFY_OUTPUT_DIR_OR_CONFIG_MSG = (
"Must set value for `output_dir` or `run_config`")
_HPARAMS_CANNOT_BE_SET_FOR_OUTPUT_DIR_MSG = (
"Must set `hparams` as None for `experiment_fn` with `output_dir`.")
_CANNOT_SET_BOTH_OUTPUT_DIR_AND_CONFIG_MSG = (
"Cannot provide both `output_dir` and `run_config`")
_INVALID_RUN_CONFIG_TYPE_MSG = (
"`run_config` must be `tf.contrib.learn.RunConfig` instance")
_RUN_CONFIG_UID_CHECK_ERR_MSG = (
"`RunConfig` instance is expected to be used by the `Estimator`")
_MISSING_RUN_CONFIG_UID_ERR_MSG = (
"Pass `run_config` argument of the `experiment_fn` to the Estimator")
class TestExperiment(experiment.Experiment):
def __init__(self, default=None, config=None, model_dir=None):
self.default = default
self.config = config
internal_model_dir = model_dir or config.model_dir
self._model_dir = internal_model_dir
class Estimator(evaluable.Evaluable, trainable.Trainable):
config = self.config
@property
def model_dir(self):
return internal_model_dir
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
raise NotImplementedError
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None, hooks=None):
raise NotImplementedError
super(TestExperiment, self).__init__(Estimator(), None, None)
def local_run(self):
return "local_run-{}".format(self._model_dir)
def train(self):
return "train-{}".format(self._model_dir)
def run_std_server(self):
return "run_std_server-{}".format(self._model_dir)
def train_and_evaluate(self):
return "train_and_evaluate-{}".format(self._model_dir)
def simple_task(self):
return "simple_task, default=%s." % self.default
# pylint: disable=unused-argument
def build_experiment(output_dir):
tf_logging.info("In default build_experiment.")
return TestExperiment(model_dir=output_dir)
def build_experiment_fn_for_output_dir(run_config=None):
def _build_experiment(output_dir):
tf_logging.info("In default build_experiment.")
return TestExperiment(config=run_config, model_dir=output_dir)
return _build_experiment
def build_experiment_for_run_config(run_config, hparams):
if hparams is not None and hparams != _HPARAMS:
raise ValueError("hparams is not set correctly")
return TestExperiment(config=run_config)
def build_non_experiment(output_dir):
return "Ceci n'est pas un Experiment."
# pylint: enable=unused-argument
def build_distributed_cluster_spec():
return {
run_config_lib.TaskType.PS: ["localhost:1234", "localhost:1235"],
run_config_lib.TaskType.WORKER: ["localhost:1236", "localhost:1237"],
run_config_lib.TaskType.MASTER: ["localhost:1238"],
"foo_has_no_default_schedule": ["localhost:1239"]
}
def build_non_distributed_cluster_spec():
return {"foo": ["localhost:1234"]}
class LearnRunnerRunWithOutputDirTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_run_with_custom_schedule(self):
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="simple_task"))
def test_run_with_explicit_local_run(self):
self.assertEqual(
"local_run-" + _MODIR_DIR,
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="local_run"))
def test_fail_output_dir_and_run_config_are_both_set(self):
with self.assertRaisesRegexp(
ValueError, _CANNOT_SET_BOTH_OUTPUT_DIR_AND_CONFIG_MSG):
learn_runner.run(build_experiment,
output_dir=_MODIR_DIR,
schedule="simple_task",
run_config=run_config_lib.RunConfig())
def test_fail_empty_output_dir(self):
with self.assertRaisesRegexp(ValueError, _MUST_SPECIFY_OUTPUT_DIR_MSG):
learn_runner.run(build_experiment, output_dir="", schedule="simple_task")
def test_fail_no_output_dir(self):
with self.assertRaisesRegexp(
ValueError, _MUST_SPECIFY_OUTPUT_DIR_OR_CONFIG_MSG):
learn_runner.run(build_experiment, None, "simple_task")
def test_fail_hparams_are_set(self):
hparams = _HPARAMS
with self.assertRaisesRegexp(
ValueError, _HPARAMS_CANNOT_BE_SET_FOR_OUTPUT_DIR_MSG):
learn_runner.run(
build_experiment, _MODIR_DIR, schedule="simple_task", hparams=hparams)
def test_fail_non_callable(self):
with self.assertRaisesRegexp(TypeError, _EXP_NOT_CALLABLE_MSG):
learn_runner.run("not callable", _MODIR_DIR, "simple_test")
def test_fail_not_experiment(self):
with self.assertRaisesRegexp(TypeError, _NOT_EXP_TYPE_MSG):
learn_runner.run(build_non_experiment, _MODIR_DIR, "simple_test")
def test_fail_non_existent_task(self):
with self.assertRaisesRegexp(ValueError, _NON_EXIST_TASK_MSG):
learn_runner.run(build_experiment, _MODIR_DIR, "mirage")
def test_fail_non_callable_task(self):
with self.assertRaisesRegexp(TypeError, _NON_CALLABLE_MSG):
learn_runner.run(build_experiment, _MODIR_DIR, "default")
class LearnRunnerRunWithRunConfigTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_run_with_custom_schedule(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="simple_task"))
def test_run_with_hparams(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"simple_task, default=None.",
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="simple_task",
hparams=_HPARAMS))
def test_run_with_explicit_local_run(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
self.assertEqual(
"local_run-" + _MODIR_DIR,
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run"))
def test_fail_empty_output_dir(self):
run_config = run_config_lib.RunConfig(model_dir="")
with self.assertRaisesRegexp(ValueError, _MISSING_MODEL_DIR_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_no_output_dir(self):
run_config = run_config_lib.RunConfig()
with self.assertRaisesRegexp(ValueError, _MISSING_MODEL_DIR_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_invalid_run_config_type(self):
run_config = "invalid_run_config"
with self.assertRaisesRegexp(ValueError, _INVALID_RUN_CONFIG_TYPE_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run")
def test_fail_invalid_hparams_type(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(ValueError, _INVALID_HPARAMS_ERR_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="local_run",
hparams=["hparams"])
def test_fail_non_callable(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _EXP_NOT_CALLABLE_MSG):
learn_runner.run("not callable",
run_config=run_config,
schedule="simple_task")
def test_fail_not_experiment(self):
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
return "not experiment"
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _NOT_EXP_TYPE_MSG):
learn_runner.run(_experiment_fn,
run_config=run_config,
schedule="simple_task")
def test_fail_non_existent_task(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(ValueError, _NON_EXIST_TASK_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="mirage")
def test_fail_non_callable_task(self):
run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
with self.assertRaisesRegexp(TypeError, _NON_CALLABLE_MSG):
learn_runner.run(build_experiment_for_run_config,
run_config=run_config,
schedule="default")
def test_basic_run_config_uid_check(self):
expected_run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
# Explicitly use a new run_config.
new_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR + "/123")
return TestExperiment(config=new_config)
with self.assertRaisesRegexp(RuntimeError, _RUN_CONFIG_UID_CHECK_ERR_MSG):
learn_runner.run(experiment_fn=_experiment_fn,
run_config=expected_run_config)
def test_fail_invalid_experiment_config_type(self):
expected_run_config = run_config_lib.RunConfig(model_dir=_MODIR_DIR)
def _experiment_fn(run_config, hparams):
del run_config, hparams # unused.
# Explicitly use a new run_config without `uid` method.
new_config = core_run_config_lib.RunConfig(
model_dir=_MODIR_DIR + "/123")
return TestExperiment(config=new_config)
with self.assertRaisesRegexp(RuntimeError,
_MISSING_RUN_CONFIG_UID_ERR_MSG):
learn_runner.run(experiment_fn=_experiment_fn,
run_config=expected_run_config)
class LearnRunnerDefaultScheduleTest(test.TestCase):
def setUp(self):
# Ensure the TF_CONFIG environment variable is unset for all tests.
os.environ.pop("TF_CONFIG", None)
def test_schedule_from_tf_config_runs_train_on_worker(self):
os.environ["TF_CONFIG"] = json.dumps({
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.WORKER
}
})
# RunConfig constructor will set job_name from TF_CONFIG.
config = run_config_lib.RunConfig()
self.assertEqual(
"train-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_schedule_from_tf_config_runs_train_and_evaluate_on_master(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.MASTER
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_schedule_from_tf_config_runs_serve_on_ps(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": run_config_lib.TaskType.PS
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"run_std_server-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_no_schedule_and_no_config_runs_train_and_evaluate(self):
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(build_experiment, output_dir=_MODIR_DIR))
def test_no_schedule_and_non_distributed_runs_train_and_evaluate(self):
tf_config = {"cluster": build_non_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertEqual(
"train_and_evaluate-" + _MODIR_DIR,
learn_runner.run(
build_experiment_fn_for_output_dir(config),
output_dir=_MODIR_DIR))
def test_fail_task_type_with_no_default_schedule(self):
tf_config = {
"cluster": build_distributed_cluster_spec(),
"task": {
"type": "foo_has_no_default_schedule"
}
}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
create_experiment_fn = lambda output_dir: TestExperiment(config=config)
self.assertRaisesRegexp(ValueError,
"No default schedule",
learn_runner.run,
create_experiment_fn,
_MODIR_DIR)
def test_fail_schedule_from_config_with_no_task_type(self):
tf_config = {"cluster": build_distributed_cluster_spec()}
with patch.dict("os.environ", {"TF_CONFIG": json.dumps(tf_config)}):
config = run_config_lib.RunConfig()
self.assertRaisesRegexp(
ValueError,
"Must specify a schedule",
learn_runner.run,
lambda output_dir: TestExperiment(config=config),
output_dir=_MODIR_DIR)
if __name__ == "__main__":
test.main()
| apache-2.0 |
HomeRad/TorCleaner | wc/filter/rules/FolderRule.py | 1 | 3945 | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2000-2009 Bastian Kleineidam
"""
Group filter rules into folders.
"""
from ... import fileutil, configuration
from . import Rule
def recalc_up_down(rules):
"""
Add .up and .down attributes to rules, used for display up/down
arrows in GUIs
"""
upper = len(rules)-1
for i, rule in enumerate(rules):
rule.up = (i>0)
rule.down = (i<upper)
class FolderRule(Rule.Rule):
"""
Container for a list of rules.
"""
def __init__(self, sid=None, titles=None, descriptions=None,
disable=0, filename=""):
"""
Initialize rule data.
"""
super(FolderRule, self).__init__(sid=sid, titles=titles,
descriptions=descriptions, disable=disable)
# make filename read-only
self._filename = filename
self.rules = []
self.attrnames.extend(('oid', 'configversion'))
self.intattrs.append('oid')
self.oid = None
self.configversion = "-"
def __str__(self):
"""
Return rule data as string.
"""
return super(FolderRule, self).__str__() + \
("\nrules: %d" % len(self.rules))
def filename_get(self):
"""
Get filename where this folder is stored.
"""
return self._filename
filename = property(filename_get)
def append_rule(self, r):
"""
Append rule to folder.
"""
r.oid = len(self.rules)
# note: the rules are added in order
self.rules.append(r)
r.parent = self
def delete_rule(self, i):
"""
Delete rule from folder with index i.
"""
del self.rules[i]
recalc_up_down(self.rules)
def update(self, rule, dryrun=False, log=None):
"""
Update this folder with given folder rule data.
"""
chg = super(FolderRule, self).update(rule, dryrun=dryrun, log=log)
for child in rule.rules:
if child.sid is None or not child.sid.startswith("wc"):
# ignore local rules
continue
oldrule = self.get_rule(child.sid)
if oldrule is not None:
if oldrule.update(child, dryrun=dryrun, log=log):
chg = True
else:
print >> log, _("inserting new rule %s") % \
child.tiptext()
if not dryrun:
self.rules.append(child)
chg = True
if chg:
recalc_up_down(self.rules)
return chg
def get_rule(self, sid):
"""
Return rule with given sid or None if not found.
"""
for rule in self.rules:
if rule.sid == sid:
return rule
return None
def toxml(self):
"""
Rule data as XML for storing.
"""
s = u"""<?xml version="1.0" encoding="%s"?>
<!DOCTYPE folder SYSTEM "filter.dtd">
%s oid="%d" configversion="%s">""" % \
(configuration.ConfigCharset, super(FolderRule, self).toxml(),
self.oid, self.configversion)
s += u"\n"+self.title_desc_toxml()+u"\n"
for r in self.rules:
s += u"\n%s\n" % r.toxml()
return s+u"</folder>\n"
def write(self, fd=None):
"""
Write xml data into filename.
@raise: OSError if file could not be written.
"""
s = self.toxml().encode("iso-8859-1", "replace")
if fd is None:
fileutil.write_file(self.filename, s)
else:
fd.write(s)
def tiptext(self):
"""
Return short info for gui display.
"""
l = len(self.rules)
if l == 1:
text = _("with 1 rule")
else:
text = _("with %d rules") % l
return "%s %s" % (super(FolderRule, self).tiptext(), text)
| gpl-2.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/json/tool.py | 11 | 1463 | r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
import argparse
import json
import sys
def main():
prog = 'python -m json.tool'
description = ('A simple command line interface for json module '
'to validate and pretty-print JSON objects.')
parser = argparse.ArgumentParser(prog=prog, description=description)
parser.add_argument('infile', nargs='?', type=argparse.FileType(),
help='a JSON file to be validated or pretty-printed')
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
help='write the output of infile to outfile')
parser.add_argument('--sort-keys', action='store_true', default=False,
help='sort the output of dictionaries alphabetically by key')
options = parser.parse_args()
infile = options.infile or sys.stdin
outfile = options.outfile or sys.stdout
sort_keys = options.sort_keys
with infile:
try:
obj = json.load(infile)
except ValueError as e:
raise SystemExit(e)
with outfile:
json.dump(obj, outfile, sort_keys=sort_keys, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
| apache-2.0 |
finch0219/linux | scripts/gdb/linux/utils.py | 630 | 4267 | #
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <[email protected]>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdbError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_u16(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return ord(buffer[0]) + (ord(buffer[1]) << 8)
else:
return ord(buffer[1]) + (ord(buffer[0]) << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if gdbserver_type is not None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
| gpl-2.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/pygments/lexers/dsls.py | 72 | 18768 | # -*- coding: utf-8 -*-
"""
pygments.lexers.dsls
~~~~~~~~~~~~~~~~~~~~
Lexers for various domain-specific languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, words, include, default
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Literal
__all__ = ['ProtoBufLexer', 'BroLexer', 'PuppetLexer', 'RslLexer',
'MscgenLexer', 'VGLLexer', 'AlloyLexer', 'PanLexer']
class ProtoBufLexer(RegexLexer):
"""
Lexer for `Protocol Buffer <http://code.google.com/p/protobuf/>`_
definition files.
.. versionadded:: 1.4
"""
name = 'Protocol Buffer'
aliases = ['protobuf', 'proto']
filenames = ['*.proto']
tokens = {
'root': [
(r'[ \t]+', Text),
(r'[,;{}\[\]()]', Punctuation),
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?\*(.|\n)*?\*(\\\n)?/', Comment.Multiline),
(words((
'import', 'option', 'optional', 'required', 'repeated', 'default',
'packed', 'ctype', 'extensions', 'to', 'max', 'rpc', 'returns',
'oneof'), prefix=r'\b', suffix=r'\b'),
Keyword),
(words((
'int32', 'int64', 'uint32', 'uint64', 'sint32', 'sint64',
'fixed32', 'fixed64', 'sfixed32', 'sfixed64',
'float', 'double', 'bool', 'string', 'bytes'), suffix=r'\b'),
Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'(package)(\s+)', bygroups(Keyword.Namespace, Text), 'package'),
(r'(message|extend)(\s+)',
bygroups(Keyword.Declaration, Text), 'message'),
(r'(enum|group|service)(\s+)',
bygroups(Keyword.Declaration, Text), 'type'),
(r'\".*?\"', String),
(r'\'.*?\'', String),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[LlUu]*', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'(\-?(inf|nan))\b', Number.Float),
(r'0x[0-9a-fA-F]+[LlUu]*', Number.Hex),
(r'0[0-7]+[LlUu]*', Number.Oct),
(r'\d+[LlUu]*', Number.Integer),
(r'[+-=]', Operator),
(r'([a-zA-Z_][\w.]*)([ \t]*)(=)',
bygroups(Name.Attribute, Text, Operator)),
('[a-zA-Z_][\w.]*', Name),
],
'package': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop'),
default('#pop'),
],
'message': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
default('#pop'),
],
'type': [
(r'[a-zA-Z_]\w*', Name, '#pop'),
default('#pop'),
],
}
class BroLexer(RegexLexer):
"""
For `Bro <http://bro-ids.org/>`_ scripts.
.. versionadded:: 1.5
"""
name = 'Bro'
aliases = ['bro']
filenames = ['*.bro']
_hex = r'[0-9a-fA-F_]'
_float = r'((\d*\.?\d+)|(\d+\.?\d*))([eE][-+]?\d+)?'
_h = r'[A-Za-z0-9][-A-Za-z0-9]*'
tokens = {
'root': [
# Whitespace
(r'^@.*?\n', Comment.Preproc),
(r'#.*?\n', Comment.Single),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text),
# Keywords
(r'(add|alarm|break|case|const|continue|delete|do|else|enum|event'
r'|export|for|function|if|global|hook|local|module|next'
r'|of|print|redef|return|schedule|switch|type|when|while)\b', Keyword),
(r'(addr|any|bool|count|counter|double|file|int|interval|net'
r'|pattern|port|record|set|string|subnet|table|time|timer'
r'|vector)\b', Keyword.Type),
(r'(T|F)\b', Keyword.Constant),
(r'(&)((?:add|delete|expire)_func|attr|(?:create|read|write)_expire'
r'|default|disable_print_hook|raw_output|encrypt|group|log'
r'|mergeable|optional|persistent|priority|redef'
r'|rotate_(?:interval|size)|synchronized)\b',
bygroups(Punctuation, Keyword)),
(r'\s+module\b', Keyword.Namespace),
# Addresses, ports and networks
(r'\d+/(tcp|udp|icmp|unknown)\b', Number),
(r'(\d+\.){3}\d+', Number),
(r'(' + _hex + r'){7}' + _hex, Number),
(r'0x' + _hex + r'(' + _hex + r'|:)*::(' + _hex + r'|:)*', Number),
(r'((\d+|:)(' + _hex + r'|:)*)?::(' + _hex + r'|:)*', Number),
(r'(\d+\.\d+\.|(\d+\.){2}\d+)', Number),
# Hostnames
(_h + r'(\.' + _h + r')+', String),
# Numeric
(_float + r'\s+(day|hr|min|sec|msec|usec)s?\b', Literal.Date),
(r'0[xX]' + _hex, Number.Hex),
(_float, Number.Float),
(r'\d+', Number.Integer),
(r'/', String.Regex, 'regex'),
(r'"', String, 'string'),
# Operators
(r'[!%*/+:<=>?~|-]', Operator),
(r'([-+=&|]{2}|[+=!><-]=)', Operator),
(r'(in|match)\b', Operator.Word),
(r'[{}()\[\]$.,;]', Punctuation),
# Identfier
(r'([_a-zA-Z]\w*)(::)', bygroups(Name, Name.Namespace)),
(r'[a-zA-Z_]\w*', Name)
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String),
(r'\\\n', String),
(r'\\', String)
],
'regex': [
(r'/', String.Regex, '#pop'),
(r'\\[\\nt/]', String.Regex), # String.Escape is too intense here.
(r'[^\\/\n]+', String.Regex),
(r'\\\n', String.Regex),
(r'\\', String.Regex)
]
}
class PuppetLexer(RegexLexer):
"""
For `Puppet <http://puppetlabs.com/>`__ configuration DSL.
.. versionadded:: 1.6
"""
name = 'Puppet'
aliases = ['puppet']
filenames = ['*.pp']
tokens = {
'root': [
include('comments'),
include('keywords'),
include('names'),
include('numbers'),
include('operators'),
include('strings'),
(r'[]{}:(),;[]', Punctuation),
(r'[^\S\n]+', Text),
],
'comments': [
(r'\s*#.*$', Comment),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
],
'operators': [
(r'(=>|\?|<|>|=|\+|-|/|\*|~|!|\|)', Operator),
(r'(in|and|or|not)\b', Operator.Word),
],
'names': [
('[a-zA-Z_]\w*', Name.Attribute),
(r'(\$\S+)(\[)(\S+)(\])', bygroups(Name.Variable, Punctuation,
String, Punctuation)),
(r'\$\S+', Name.Variable),
],
'numbers': [
# Copypasta from the Python lexer
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
(r'\d+[eE][+-]?[0-9]+j?', Number.Float),
(r'0[0-7]+j?', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+j?', Number.Integer)
],
'keywords': [
# Left out 'group' and 'require'
# Since they're often used as attributes
(words((
'absent', 'alert', 'alias', 'audit', 'augeas', 'before', 'case',
'check', 'class', 'computer', 'configured', 'contained',
'create_resources', 'crit', 'cron', 'debug', 'default',
'define', 'defined', 'directory', 'else', 'elsif', 'emerg',
'err', 'exec', 'extlookup', 'fail', 'false', 'file',
'filebucket', 'fqdn_rand', 'generate', 'host', 'if', 'import',
'include', 'info', 'inherits', 'inline_template', 'installed',
'interface', 'k5login', 'latest', 'link', 'loglevel',
'macauthorization', 'mailalias', 'maillist', 'mcx', 'md5',
'mount', 'mounted', 'nagios_command', 'nagios_contact',
'nagios_contactgroup', 'nagios_host', 'nagios_hostdependency',
'nagios_hostescalation', 'nagios_hostextinfo', 'nagios_hostgroup',
'nagios_service', 'nagios_servicedependency', 'nagios_serviceescalation',
'nagios_serviceextinfo', 'nagios_servicegroup', 'nagios_timeperiod',
'node', 'noop', 'notice', 'notify', 'package', 'present', 'purged',
'realize', 'regsubst', 'resources', 'role', 'router', 'running',
'schedule', 'scheduled_task', 'search', 'selboolean', 'selmodule',
'service', 'sha1', 'shellquote', 'split', 'sprintf',
'ssh_authorized_key', 'sshkey', 'stage', 'stopped', 'subscribe',
'tag', 'tagged', 'template', 'tidy', 'true', 'undef', 'unmounted',
'user', 'versioncmp', 'vlan', 'warning', 'yumrepo', 'zfs', 'zone',
'zpool'), prefix='(?i)', suffix=r'\b'),
Keyword),
],
'strings': [
(r'"([^"])*"', String),
(r"'(\\'|[^'])*'", String),
],
}
class RslLexer(RegexLexer):
"""
`RSL <http://en.wikipedia.org/wiki/RAISE>`_ is the formal specification
language used in RAISE (Rigorous Approach to Industrial Software Engineering)
method.
.. versionadded:: 2.0
"""
name = 'RSL'
aliases = ['rsl']
filenames = ['*.rsl']
mimetypes = ['text/rsl']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
(words((
'Bool', 'Char', 'Int', 'Nat', 'Real', 'Text', 'Unit', 'abs',
'all', 'always', 'any', 'as', 'axiom', 'card', 'case', 'channel',
'chaos', 'class', 'devt_relation', 'dom', 'elems', 'else', 'elif',
'end', 'exists', 'extend', 'false', 'for', 'hd', 'hide', 'if',
'in', 'is', 'inds', 'initialise', 'int', 'inter', 'isin', 'len',
'let', 'local', 'ltl_assertion', 'object', 'of', 'out', 'post',
'pre', 'read', 'real', 'rng', 'scheme', 'skip', 'stop', 'swap',
'then', 'theory', 'test_case', 'tl', 'transition_system', 'true',
'type', 'union', 'until', 'use', 'value', 'variable', 'while',
'with', 'write', '~isin', '-inflist', '-infset', '-list',
'-set'), prefix=r'\b', suffix=r'\b'),
Keyword),
(r'(variable|value)\b', Keyword.Declaration),
(r'--.*?\n', Comment),
(r'<:.*?:>', Comment),
(r'\{!.*?!\}', Comment),
(r'/\*.*?\*/', Comment),
(r'^[ \t]*([\w]+)[ \t]*:[^:]', Name.Function),
(r'(^[ \t]*)([\w]+)([ \t]*\([\w\s,]*\)[ \t]*)(is|as)',
bygroups(Text, Name.Function, Text, Keyword)),
(r'\b[A-Z]\w*\b', Keyword.Type),
(r'(true|false)\b', Keyword.Constant),
(r'".*"', String),
(r'\'.\'', String.Char),
(r'(><|->|-m->|/\\|<=|<<=|<\.|\|\||\|\^\||-~->|-~m->|\\/|>=|>>|'
r'\.>|\+\+|-\\|<->|=>|:-|~=|\*\*|<<|>>=|\+>|!!|\|=\||#)',
Operator),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'.', Text),
],
}
def analyse_text(text):
"""
Check for the most common text in the beginning of a RSL file.
"""
if re.search(r'scheme\s*.*?=\s*class\s*type', text, re.I) is not None:
return 1.0
class MscgenLexer(RegexLexer):
"""
For `Mscgen <http://www.mcternan.me.uk/mscgen/>`_ files.
.. versionadded:: 1.6
"""
name = 'Mscgen'
aliases = ['mscgen', 'msc']
filenames = ['*.msc']
_var = r'(\w+|"(?:\\"|[^"])*")'
tokens = {
'root': [
(r'msc\b', Keyword.Type),
# Options
(r'(hscale|HSCALE|width|WIDTH|wordwraparcs|WORDWRAPARCS'
r'|arcgradient|ARCGRADIENT)\b', Name.Property),
# Operators
(r'(abox|ABOX|rbox|RBOX|box|BOX|note|NOTE)\b', Operator.Word),
(r'(\.|-|\|){3}', Keyword),
(r'(?:-|=|\.|:){2}'
r'|<<=>>|<->|<=>|<<>>|<:>'
r'|->|=>>|>>|=>|:>|-x|-X'
r'|<-|<<=|<<|<=|<:|x-|X-|=', Operator),
# Names
(r'\*', Name.Builtin),
(_var, Name.Variable),
# Other
(r'\[', Punctuation, 'attrs'),
(r'\{|\}|,|;', Punctuation),
include('comments')
],
'attrs': [
(r'\]', Punctuation, '#pop'),
(_var + r'(\s*)(=)(\s*)' + _var,
bygroups(Name.Attribute, Text.Whitespace, Operator, Text.Whitespace,
String)),
(r',', Punctuation),
include('comments')
],
'comments': [
(r'(?://|#).*?\n', Comment.Single),
(r'/\*(?:.|\n)*?\*/', Comment.Multiline),
(r'[ \t\r\n]+', Text.Whitespace)
]
}
class VGLLexer(RegexLexer):
"""
For `SampleManager VGL <http://www.thermoscientific.com/samplemanager>`_
source code.
.. versionadded:: 1.6
"""
name = 'VGL'
aliases = ['vgl']
filenames = ['*.rpf']
flags = re.MULTILINE | re.DOTALL | re.IGNORECASE
tokens = {
'root': [
(r'\{[^}]*\}', Comment.Multiline),
(r'declare', Keyword.Constant),
(r'(if|then|else|endif|while|do|endwhile|and|or|prompt|object'
r'|create|on|line|with|global|routine|value|endroutine|constant'
r'|global|set|join|library|compile_option|file|exists|create|copy'
r'|delete|enable|windows|name|notprotected)(?! *[=<>.,()])',
Keyword),
(r'(true|false|null|empty|error|locked)', Keyword.Constant),
(r'[~^*#!%&\[\]()<>|+=:;,./?-]', Operator),
(r'"[^"]*"', String),
(r'(\.)([a-z_$][\w$]*)', bygroups(Operator, Name.Attribute)),
(r'[0-9][0-9]*(\.[0-9]+(e[+\-]?[0-9]+)?)?', Number),
(r'[a-z_$][\w$]*', Name),
(r'[\r\n]+', Text),
(r'\s+', Text)
]
}
class AlloyLexer(RegexLexer):
"""
For `Alloy <http://alloy.mit.edu>`_ source code.
.. versionadded:: 2.0
"""
name = 'Alloy'
aliases = ['alloy']
filenames = ['*.als']
mimetypes = ['text/x-alloy']
flags = re.MULTILINE | re.DOTALL
iden_rex = r'[a-zA-Z_][\w\']*'
text_tuple = (r'[^\S\n]+', Text)
tokens = {
'sig': [
(r'(extends)\b', Keyword, '#pop'),
(iden_rex, Name),
text_tuple,
(r',', Punctuation),
(r'\{', Operator, '#pop'),
],
'module': [
text_tuple,
(iden_rex, Name, '#pop'),
],
'fun': [
text_tuple,
(r'\{', Operator, '#pop'),
(iden_rex, Name, '#pop'),
],
'root': [
(r'--.*?$', Comment.Single),
(r'//.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
text_tuple,
(r'(module|open)(\s+)', bygroups(Keyword.Namespace, Text),
'module'),
(r'(sig|enum)(\s+)', bygroups(Keyword.Declaration, Text), 'sig'),
(r'(iden|univ|none)\b', Keyword.Constant),
(r'(int|Int)\b', Keyword.Type),
(r'(this|abstract|extends|set|seq|one|lone|let)\b', Keyword),
(r'(all|some|no|sum|disj|when|else)\b', Keyword),
(r'(run|check|for|but|exactly|expect|as)\b', Keyword),
(r'(and|or|implies|iff|in)\b', Operator.Word),
(r'(fun|pred|fact|assert)(\s+)', bygroups(Keyword, Text), 'fun'),
(r'!|#|&&|\+\+|<<|>>|>=|<=>|<=|\.|->', Operator),
(r'[-+/*%=<>&!^|~{}\[\]().]', Operator),
(iden_rex, Name),
(r'[:,]', Punctuation),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\n', Text),
]
}
class PanLexer(RegexLexer):
"""
Lexer for `pan <http://github.com/quattor/pan/>`_ source files.
Based on tcsh lexer.
.. versionadded:: 2.0
"""
name = 'Pan'
aliases = ['pan']
filenames = ['*.pan']
tokens = {
'root': [
include('basic'),
(r'\(', Keyword, 'paren'),
(r'\{', Keyword, 'curly'),
include('data'),
],
'basic': [
(words((
'if', 'for', 'with', 'else', 'type', 'bind', 'while', 'valid', 'final', 'prefix',
'unique', 'object', 'foreach', 'include', 'template', 'function', 'variable',
'structure', 'extensible', 'declaration'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
(words((
'file_contents', 'format', 'index', 'length', 'match', 'matches', 'replace',
'splice', 'split', 'substr', 'to_lowercase', 'to_uppercase', 'debug', 'error',
'traceback', 'deprecated', 'base64_decode', 'base64_encode', 'digest', 'escape',
'unescape', 'append', 'create', 'first', 'nlist', 'key', 'list', 'merge', 'next',
'prepend', 'is_boolean', 'is_defined', 'is_double', 'is_list', 'is_long',
'is_nlist', 'is_null', 'is_number', 'is_property', 'is_resource', 'is_string',
'to_boolean', 'to_double', 'to_long', 'to_string', 'clone', 'delete', 'exists',
'path_exists', 'if_exists', 'return', 'value'), prefix=r'\b', suffix=r'\s*\b'),
Name.Builtin),
(r'#.*', Comment),
(r'\\[\w\W]', String.Escape),
(r'(\b\w+)(\s*)(=)', bygroups(Name.Variable, Text, Operator)),
(r'[\[\]{}()=]+', Operator),
(r'<<\s*(\'?)\\?(\w+)[\w\W]+?\2', String),
(r';', Punctuation),
],
'data': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single),
(r'\s+', Text),
(r'[^=\s\[\]{}()$"\'`\\;#]+', Text),
(r'\d+(?= |\Z)', Number),
],
'curly': [
(r'\}', Keyword, '#pop'),
(r':-', Keyword),
(r'\w+', Name.Variable),
(r'[^}:"\'`$]+', Punctuation),
(r':', Punctuation),
include('root'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('root'),
],
}
| bsd-3-clause |
skg-net/ansible | lib/ansible/modules/cloud/amazon/ec2_snapshot.py | 71 | 9675 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
type: bool
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()
| gpl-3.0 |
funkring/fdoo | addons/web_linkedin/web_linkedin.py | 333 | 4485 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import urllib2
from urlparse import urlparse, urlunparse
import openerp
import openerp.addons.web
from openerp.osv import fields, osv
class Binary(openerp.http.Controller):
@openerp.http.route('/web_linkedin/binary/url2binary', type='json', auth='user')
def url2binary(self, url):
"""Used exclusively to load images from LinkedIn profiles, must not be used for anything else."""
_scheme, _netloc, path, params, query, fragment = urlparse(url)
# media.linkedin.com is the master domain for LinkedIn media (replicated to CDNs),
# so forcing it should always work and prevents abusing this method to load arbitrary URLs
url = urlunparse(('http', 'media.licdn.com', path, params, query, fragment))
bfile = urllib2.urlopen(url)
return base64.b64encode(bfile.read())
class web_linkedin_settings(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'api_key': fields.char(string="API Key", size=50),
'server_domain': fields.char(),
}
def get_default_linkedin(self, cr, uid, fields, context=None):
key = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.linkedin.apikey") or ""
dom = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
return {'api_key': key, 'server_domain': dom,}
def set_linkedin(self, cr, uid, ids, context=None):
key = self.browse(cr, uid, ids[0], context)["api_key"] or ""
self.pool.get("ir.config_parameter").set_param(cr, uid, "web.linkedin.apikey", key, groups=['base.group_user'])
class web_linkedin_fields(osv.Model):
_inherit = 'res.partner'
def _get_url(self, cr, uid, ids, name, arg, context=None):
res = dict((id, False) for id in ids)
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = partner.linkedin_url
return res
def linkedin_check_similar_partner(self, cr, uid, linkedin_datas, context=None):
res = []
res_partner = self.pool.get('res.partner')
for linkedin_data in linkedin_datas:
partner_ids = res_partner.search(cr, uid, ["|", ("linkedin_id", "=", linkedin_data['id']),
"&", ("linkedin_id", "=", False),
"|", ("name", "ilike", linkedin_data['firstName'] + "%" + linkedin_data['lastName']), ("name", "ilike", linkedin_data['lastName'] + "%" + linkedin_data['firstName'])], context=context)
if partner_ids:
partner = res_partner.read(cr, uid, partner_ids[0], ["image", "mobile", "phone", "parent_id", "name", "email", "function", "linkedin_id"], context=context)
if partner['linkedin_id'] and partner['linkedin_id'] != linkedin_data['id']:
partner.pop('id')
if partner['parent_id']:
partner['parent_id'] = partner['parent_id'][0]
for key, val in partner.items():
if not val:
partner.pop(key)
res.append(partner)
else:
res.append({})
return res
_columns = {
'linkedin_id': fields.char(string="LinkedIn ID"),
'linkedin_url': fields.char(string="LinkedIn url", store=True),
'linkedin_public_url': fields.function(_get_url, type='text', string="LinkedIn url",
help="This url is set automatically when you join the partner with a LinkedIn account."),
}
| agpl-3.0 |
Hybrid-Cloud/cinder | cinder/tests/unit/volume/drivers/emc/scaleio/test_create_volume.py | 5 | 5821 | # Copyright (c) 2013 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
@ddt.ddt
class TestCreateVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.create_volume()``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestCreateVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume.fake_volume_obj(ctx)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume.name: '"{}"'.format(self.volume.id),
'types/Volume/instances': {'id': self.volume.id},
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME:
'"{}"'.format(self.PROT_DOMAIN_ID),
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): '"{}"'.format(self.STORAGE_POOL_ID),
},
self.RESPONSE_MODE.Invalid: {
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: None,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): None,
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances': self.BAD_STATUS_RESPONSE,
'types/Domain/instances/getByName::' +
self.PROT_DOMAIN_NAME: self.BAD_STATUS_RESPONSE,
'types/Pool/instances/getByName::{},{}'.format(
self.PROT_DOMAIN_ID,
self.STORAGE_POOL_NAME
): self.BAD_STATUS_RESPONSE,
},
}
def test_no_domain(self):
"""No protection domain name or ID provided."""
self.driver.protection_domain_name = None
self.driver.protection_domain_id = None
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
def test_no_domain_id(self):
"""Only protection domain name provided."""
self.driver.protection_domain_id = None
self.driver.protection_domain_name = self.PROT_DOMAIN_NAME
self.driver.storage_pool_name = None
self.driver.storage_pool_id = self.STORAGE_POOL_ID
self.test_create_volume()
def test_no_domain_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_domain_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_domain_id)
def test_no_storage_id(self):
"""Only protection domain name provided."""
self.driver.storage_pool_id = None
self.driver.storage_pool_name = self.STORAGE_POOL_NAME
self.driver.protection_domain_id = self.PROT_DOMAIN_ID
self.driver.protection_domain_name = None
self.test_create_volume()
def test_no_storage_id_invalid_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_no_storage_id_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_no_storage_id)
def test_create_volume(self):
"""Valid create volume parameters"""
self.driver.create_volume(self.volume)
def test_create_volume_non_8_gran(self):
self.volume.size = 14
model_update = self.driver.create_volume(self.volume)
self.assertEqual(16, model_update['size'])
def test_create_volume_badstatus_response(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
@ddt.data({'provisioning:type': 'thin'}, {'provisioning:type': 'thin'})
def test_create_thin_thick_volume(self, extraspecs):
self.driver._get_volumetype_extraspecs = mock.MagicMock()
self.driver._get_volumetype_extraspecs.return_value = extraspecs
self.driver.create_volume(self.volume)
def test_create_volume_bad_provisioning_type(self):
extraspecs = {'provisioning:type': 'other'}
self.driver._get_volumetype_extraspecs = mock.MagicMock()
self.driver._get_volumetype_extraspecs.return_value = extraspecs
self.assertRaises(exception.VolumeBackendAPIException,
self.test_create_volume)
| apache-2.0 |
RAJSD2610/SDNopenflowSwitchAnalysis | TotalFlowPlot.py | 1 | 2742 | import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn
seaborn.set()
path= os.path.expanduser("~/Desktop/ece671/udpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
u8=[]
i=0
def file_len(fname):
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
while i<(num_files/2) :
# df+=[]
j=i+1
path ="/home/vetri/Desktop/ece671/udpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u8.append(y)
i+=1
print(u8)
path= os.path.expanduser("~/Desktop/ece671/udpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
u=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/udpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
u.append(y)
i+=1
print(u)
path= os.path.expanduser("~/Desktop/ece671/tcpnone")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpnone/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t.append(y)
i+=1
print(t)
path= os.path.expanduser("~/Desktop/ece671/tcpt8")
num_files = len([f for f in os.listdir(path)if os.path.isfile(os.path.join(path, f))])
print(num_files)
i=0
j=0
t8=[]
while i<(num_files/2):
j=i+1
path ="/home/vetri/Desktop/ece671/tcpt8/ftotal."+str(j)+".csv"
y = file_len(path)
# except: pass
#df.append(pd.read_csv(path,header=None))
# a+=[]
#y=len(df[i].index)-1 #1 row added by default so that table has a entry
if y<0:
y=0
t8.append(y)
i+=1
print(t8)
#plt.figure(figsize=(4, 5))
plt.plot(list(range(1,len(u8)+1)),u8, '.-',label="udpt8")
plt.plot(list(range(1,len(u)+1)),u, '.-',label="udpnone")
plt.plot(list(range(1,len(t)+1)),t, '.-',label="tcpnone")
plt.plot(list(range(1,len(t8)+1)),t8, '.-',label="tcpt8")
plt.title("Total Flows Present after 1st flow")
plt.xlabel("time(s)")
plt.ylabel("flows")
#plt.frameon=True
plt.legend()
plt.show()
| gpl-3.0 |
Stratio/cassandra | test/system/test_thrift_server.py | 10 | 107002 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to run a single test, run from trunk/:
# PYTHONPATH=test nosetests --tests=system.test_thrift_server:TestMutations.test_empty_range
import os, sys, time, struct, uuid, re
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
def cas(expected, updates):
return client.cas('key1', 'Standard1', expected, updates, ConsistencyLevel.SERIAL, ConsistencyLevel.QUORUM)
cas_result = cas(_SIMPLE_COLUMNS, _SIMPLE_COLUMNS)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
assert cas([], _SIMPLE_COLUMNS).success
result = [cosc.column for cosc in _big_slice('key1', ColumnParent('Standard1'))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
cas_result = cas([], _SIMPLE_COLUMNS)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == { _SIMPLE_COLUMNS[0].name : _SIMPLE_COLUMNS[0].value }, cas_result
# CL.SERIAL for reads
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.SERIAL).column.value == 'value1'
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange('', '', False, count))
_set_keyspace('Keyspace1')
key = 'key1'
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in xrange(0, 3050):
client.insert(key, parent, Column(str(i), '', 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_slice_standard(self):
_set_keyspace('Keyspace1')
columns = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0),
Column('c3', 'value3', 0),
Column('c4', 'value4', 0),
Column('c5', 'value5', 0)]
for column in columns:
client.insert('key', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='c2', finish='c5')))
client.batch_mutate({'key': {'Standard1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Standard1', column='c1'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c2'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c3'))
_assert_no_columnpath('key', ColumnPath('Standard1', column='c4'))
_assert_columnpath_exists('key', ColumnPath('Standard1', column='c5'))
def test_batch_mutate_remove_slice_of_entire_supercolumns(self):
_set_keyspace('Keyspace1')
columns = [SuperColumn(name='sc1', columns=[Column(_i64(1), 'value1', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(2), 'value2', 0), Column(_i64(3), 'value3', 0)]),
SuperColumn(name='sc3', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc4',
columns=[Column(_i64(5), 'value5', 0), Column(_i64(6), 'value6', 0)]),
SuperColumn(name='sc5', columns=[Column(_i64(7), 'value7', 0)])]
for column in columns:
for subcolumn in column.columns:
client.insert('key', ColumnParent('Super1', column.name), subcolumn, ConsistencyLevel.ONE)
d = Deletion(1, predicate=SlicePredicate(slice_range=SliceRange(start='sc2', finish='sc5')))
client.batch_mutate({'key': {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(2)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc2', column=_i64(3)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc3', column=_i64(4)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(5)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc4', column=_i64(6)))
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc5', column=_i64(7)))
def test_batch_mutate_remove_slice_part_of_supercolumns(self):
_set_keyspace('Keyspace1')
columns = [Column(_i64(1), 'value1', 0),
Column(_i64(2), 'value2', 0),
Column(_i64(3), 'value3', 0),
Column(_i64(4), 'value4', 0),
Column(_i64(5), 'value5', 0)]
for column in columns:
client.insert('key', ColumnParent('Super1', 'sc1'), column, ConsistencyLevel.ONE)
r = SliceRange(start=_i64(2), finish=_i64(5))
d = Deletion(1, super_column='sc1', predicate=SlicePredicate(slice_range=r))
client.batch_mutate({'key': {'Super1' : [Mutation(deletion=d)]}}, ConsistencyLevel.ONE)
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(1)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(2)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(3)))
_assert_no_columnpath('key', ColumnPath('Super1', super_column='sc1', column=_i64(4)))
_assert_columnpath_exists('key', ColumnPath('Super1', super_column='sc1', column=_i64(5)))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 5, kspaces # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = client.describe_token_map().items()
assert len(ring) == 1
token, node = ring[0]
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(3)), IndexExpression('birthdate', IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange('', '', None, None, [IndexExpression(u, IndexOperator.EQ, 'a'), IndexExpression(u2, IndexOperator.EQ, 'b')], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
| apache-2.0 |
mattskone/garage-alarm | exploring/iso_ss.py | 2 | 1392 | """
Utility script for experimenting with different ISO and shutter speeds.
"""
from fractions import Fraction
import sys
import time
from picamera import PiCamera
def get_camera():
camera = PiCamera()
camera.hflip = True
camera.vflip = True
return camera
def custom():
c = get_camera()
iso = int(sys.argv[1])
ss = int(sys.argv[2])
c.framerate = Fraction(1000000, ss)
c.ss = ss
c.iso = iso
print 'Capturing with ISO {0}, shutter speed {1}, and frame rate {2}'.format(iso, ss, c.framerate)
time.sleep(10)
c.exposure_mode = 'off'
c.capture('temp/temp.jpg')
c.close()
def main():
iso_list = [100, 200, 400, 800]
ss_list = [50000, 500000, 2500000, 5000000]
for iso in iso_list:
for ss in ss_list:
c = get_camera()
c.framerate = Fraction(1000000, ss)
c.ss = ss
c.iso = iso
time.sleep(10)
c.exposure_mode = 'off'
print 'Capturing with ISO {0}, shutter speed {1}, and frame rate {2}'.format(iso, ss, c.framerate)
print 'Gains: {0}'.format(c.awb_gains)
filename = 'test_{0}_{1}_{2}.jpg'.format(iso, ss, float(c.framerate))
c.capture('temp/' + filename)
c.close()
if __name__ == '__main__':
if len(sys.argv) > 1:
custom()
else:
main()
print 'Done'
| mit |
TRESCLOUD/odoopub | addons/l10n_be/wizard/l10n_be_vat_intra.py | 332 | 14728 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# Adapted by Noviat to
# - make the 'mand_id' field optional
# - support Noviat tax code scheme
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import base64
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.report import report_sxw
class partner_vat_intra(osv.osv_memory):
"""
Partner Vat Intra
"""
_name = "partner.vat.intra"
_description = 'Partner VAT Intra'
def _get_xml_data(self, cr, uid, context=None):
if context.get('file_save', False):
return base64.encodestring(context['file_save'].encode('utf8'))
return ''
def _get_europe_country(self, cursor, user, context=None):
return self.pool.get('res.country').search(cursor, user, [('code', 'in', ['AT', 'BG', 'CY', 'CZ', 'DK', 'EE', 'FI', 'FR', 'DE', 'GR', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'PL', 'PT', 'RO', 'SK', 'SI', 'ES', 'SE', 'GB'])])
_columns = {
'name': fields.char('File Name'),
'period_code': fields.char('Period Code', size=6, required=True, help='''This is where you have to set the period code for the intracom declaration using the format: ppyyyy
PP can stand for a month: from '01' to '12'.
PP can stand for a trimester: '31','32','33','34'
The first figure means that it is a trimester,
The second figure identify the trimester.
PP can stand for a complete fiscal year: '00'.
YYYY stands for the year (4 positions).
'''
),
'period_ids': fields.many2many('account.period', 'account_period_rel', 'acc_id', 'period_id', 'Period (s)', help = 'Select here the period(s) you want to include in your intracom declaration'),
'tax_code_id': fields.many2one('account.tax.code', 'Company', domain=[('parent_id', '=', False)], help="Keep empty to use the user's company", required=True),
'test_xml': fields.boolean('Test XML file', help="Sets the XML output as test file"),
'mand_id' : fields.char('Reference', help="Reference given by the Representative of the sending company."),
'msg': fields.text('File created', readonly=True),
'no_vat': fields.text('Partner With No VAT', readonly=True, help="The Partner whose VAT number is not defined and they are not included in XML File."),
'file_save' : fields.binary('Save File', readonly=True),
'country_ids': fields.many2many('res.country', 'vat_country_rel', 'vat_id', 'country_id', 'European Countries'),
'comments': fields.text('Comments'),
}
def _get_tax_code(self, cr, uid, context=None):
obj_tax_code = self.pool.get('account.tax.code')
obj_user = self.pool.get('res.users')
company_id = obj_user.browse(cr, uid, uid, context=context).company_id.id
tax_code_ids = obj_tax_code.search(cr, uid, [('company_id', '=', company_id), ('parent_id', '=', False)], context=context)
return tax_code_ids and tax_code_ids[0] or False
_defaults = {
'country_ids': _get_europe_country,
'file_save': _get_xml_data,
'name': 'vat_intra.xml',
'tax_code_id': _get_tax_code,
}
def _get_datas(self, cr, uid, ids, context=None):
"""Collects require data for vat intra xml
:param ids: id of wizard.
:return: dict of all data to be used to generate xml for Partner VAT Intra.
:rtype: dict
"""
if context is None:
context = {}
obj_user = self.pool.get('res.users')
obj_sequence = self.pool.get('ir.sequence')
obj_partner = self.pool.get('res.partner')
xmldict = {}
post_code = street = city = country = data_clientinfo = ''
seq = amount_sum = 0
wiz_data = self.browse(cr, uid, ids[0], context=context)
comments = wiz_data.comments
if wiz_data.tax_code_id:
data_company = wiz_data.tax_code_id.company_id
else:
data_company = obj_user.browse(cr, uid, uid, context=context).company_id
# Get Company vat
company_vat = data_company.partner_id.vat
if not company_vat:
raise osv.except_osv(_('Insufficient Data!'),_('No VAT number associated with your company.'))
company_vat = company_vat.replace(' ','').upper()
issued_by = company_vat[:2]
if len(wiz_data.period_code) != 6:
raise osv.except_osv(_('Error!'), _('Period code is not valid.'))
if not wiz_data.period_ids:
raise osv.except_osv(_('Insufficient Data!'),_('Please select at least one Period.'))
p_id_list = obj_partner.search(cr, uid, [('vat','!=',False)], context=context)
if not p_id_list:
raise osv.except_osv(_('Insufficient Data!'),_('No partner has a VAT number associated with him.'))
seq_declarantnum = obj_sequence.get(cr, uid, 'declarantnum')
dnum = company_vat[2:] + seq_declarantnum[-4:]
addr = obj_partner.address_get(cr, uid, [data_company.partner_id.id], ['invoice'])
email = data_company.partner_id.email or ''
phone = data_company.partner_id.phone or ''
if addr.get('invoice',False):
ads = obj_partner.browse(cr, uid, [addr['invoice']])[0]
city = (ads.city or '')
post_code = (ads.zip or '')
if ads.street:
street = ads.street
if ads.street2:
street += ' '
street += ads.street2
if ads.country_id:
country = ads.country_id.code
if not country:
country = company_vat[:2]
if not email:
raise osv.except_osv(_('Insufficient Data!'),_('No email address associated with the company.'))
if not phone:
raise osv.except_osv(_('Insufficient Data!'),_('No phone associated with the company.'))
xmldict.update({
'company_name': data_company.name,
'company_vat': company_vat,
'vatnum': company_vat[2:],
'mand_id': wiz_data.mand_id,
'sender_date': str(time.strftime('%Y-%m-%d')),
'street': street,
'city': city,
'post_code': post_code,
'country': country,
'email': email,
'phone': phone.replace('/','').replace('.','').replace('(','').replace(')','').replace(' ',''),
'period': wiz_data.period_code,
'clientlist': [],
'comments': comments,
'issued_by': issued_by,
})
#tax code 44: services
#tax code 46L: normal good deliveries
#tax code 46T: ABC good deliveries
#tax code 48xxx: credite note on tax code xxx
codes = ('44', '46L', '46T', '48s44', '48s46L', '48s46T')
cr.execute('''SELECT p.name As partner_name, l.partner_id AS partner_id, p.vat AS vat,
(CASE WHEN t.code = '48s44' THEN '44'
WHEN t.code = '48s46L' THEN '46L'
WHEN t.code = '48s46T' THEN '46T'
ELSE t.code END) AS intra_code,
SUM(CASE WHEN t.code in ('48s44','48s46L','48s46T') THEN -l.tax_amount ELSE l.tax_amount END) AS amount
FROM account_move_line l
LEFT JOIN account_tax_code t ON (l.tax_code_id = t.id)
LEFT JOIN res_partner p ON (l.partner_id = p.id)
WHERE t.code IN %s
AND l.period_id IN %s
AND t.company_id = %s
GROUP BY p.name, l.partner_id, p.vat, intra_code''', (codes, tuple([p.id for p in wiz_data.period_ids]), data_company.id))
p_count = 0
for row in cr.dictfetchall():
if not row['vat']:
row['vat'] = ''
p_count += 1
seq += 1
amt = row['amount'] or 0.0
amount_sum += amt
intra_code = row['intra_code'] == '44' and 'S' or (row['intra_code'] == '46L' and 'L' or (row['intra_code'] == '46T' and 'T' or ''))
xmldict['clientlist'].append({
'partner_name': row['partner_name'],
'seq': seq,
'vatnum': row['vat'][2:].replace(' ','').upper(),
'vat': row['vat'],
'country': row['vat'][:2],
'amount': round(amt,2),
'intra_code': row['intra_code'],
'code': intra_code})
xmldict.update({'dnum': dnum, 'clientnbr': str(seq), 'amountsum': round(amount_sum,2), 'partner_wo_vat': p_count})
return xmldict
def create_xml(self, cursor, user, ids, context=None):
"""Creates xml that is to be exported and sent to estate for partner vat intra.
:return: Value for next action.
:rtype: dict
"""
mod_obj = self.pool.get('ir.model.data')
xml_data = self._get_datas(cursor, user, ids, context=context)
month_quarter = xml_data['period'][:2]
year = xml_data['period'][2:]
data_file = ''
# Can't we do this by etree?
data_head = """<?xml version="1.0" encoding="ISO-8859-1"?>
<ns2:IntraConsignment xmlns="http://www.minfin.fgov.be/InputCommon" xmlns:ns2="http://www.minfin.fgov.be/IntraConsignment" IntraListingsNbr="1">
<ns2:Representative>
<RepresentativeID identificationType="NVAT" issuedBy="%(issued_by)s">%(vatnum)s</RepresentativeID>
<Name>%(company_name)s</Name>
<Street>%(street)s</Street>
<PostCode>%(post_code)s</PostCode>
<City>%(city)s</City>
<CountryCode>%(country)s</CountryCode>
<EmailAddress>%(email)s</EmailAddress>
<Phone>%(phone)s</Phone>
</ns2:Representative>""" % (xml_data)
if xml_data['mand_id']:
data_head += '\n\t\t<ns2:RepresentativeReference>%(mand_id)s</ns2:RepresentativeReference>' % (xml_data)
data_comp_period = '\n\t\t<ns2:Declarant>\n\t\t\t<VATNumber>%(vatnum)s</VATNumber>\n\t\t\t<Name>%(company_name)s</Name>\n\t\t\t<Street>%(street)s</Street>\n\t\t\t<PostCode>%(post_code)s</PostCode>\n\t\t\t<City>%(city)s</City>\n\t\t\t<CountryCode>%(country)s</CountryCode>\n\t\t\t<EmailAddress>%(email)s</EmailAddress>\n\t\t\t<Phone>%(phone)s</Phone>\n\t\t</ns2:Declarant>' % (xml_data)
if month_quarter.startswith('3'):
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Quarter>'+month_quarter[1]+'</ns2:Quarter> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
elif month_quarter.startswith('0') and month_quarter.endswith('0'):
data_comp_period+= '\n\t\t<ns2:Period>\n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
else:
data_comp_period += '\n\t\t<ns2:Period>\n\t\t\t<ns2:Month>'+month_quarter+'</ns2:Month> \n\t\t\t<ns2:Year>'+year+'</ns2:Year>\n\t\t</ns2:Period>'
data_clientinfo = ''
for client in xml_data['clientlist']:
if not client['vatnum']:
raise osv.except_osv(_('Insufficient Data!'),_('No vat number defined for %s.') % client['partner_name'])
data_clientinfo +='\n\t\t<ns2:IntraClient SequenceNumber="%(seq)s">\n\t\t\t<ns2:CompanyVATNumber issuedBy="%(country)s">%(vatnum)s</ns2:CompanyVATNumber>\n\t\t\t<ns2:Code>%(code)s</ns2:Code>\n\t\t\t<ns2:Amount>%(amount).2f</ns2:Amount>\n\t\t</ns2:IntraClient>' % (client)
data_decl = '\n\t<ns2:IntraListing SequenceNumber="1" ClientsNbr="%(clientnbr)s" DeclarantReference="%(dnum)s" AmountSum="%(amountsum).2f">' % (xml_data)
data_file += data_head + data_decl + data_comp_period + data_clientinfo + '\n\t\t<ns2:Comment>%(comments)s</ns2:Comment>\n\t</ns2:IntraListing>\n</ns2:IntraConsignment>' % (xml_data)
context = dict(context or {})
context['file_save'] = data_file
model_data_ids = mod_obj.search(cursor, user,[('model','=','ir.ui.view'),('name','=','view_vat_intra_save')], context=context)
resource_id = mod_obj.read(cursor, user, model_data_ids, fields=['res_id'], context=context)[0]['res_id']
return {
'name': _('Save'),
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'partner.vat.intra',
'views': [(resource_id,'form')],
'view_id': 'view_vat_intra_save',
'type': 'ir.actions.act_window',
'target': 'new',
}
def preview(self, cr, uid, ids, context=None):
xml_data = self._get_datas(cr, uid, ids, context=context)
datas = {
'ids': [],
'model': 'partner.vat.intra',
'form': xml_data
}
return self.pool['report'].get_action(
cr, uid, [], 'l10n_be.report_l10nvatintraprint', data=datas, context=context
)
class vat_intra_print(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(vat_intra_print, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
class wrapped_vat_intra_print(osv.AbstractModel):
_name = 'report.l10n_be.report_l10nvatintraprint'
_inherit = 'report.abstract_report'
_template = 'l10n_be.report_l10nvatintraprint'
_wrapped_report_class = vat_intra_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
CatsAndDogsbvba/odoo | addons/project/res_config.py | 232 | 4551 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_sale_service': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module sale_service.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help='This will set the unit of measure used in projects and tasks.\n'
'Changing the unit will only impact new entries.'),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
codegooglecom/jaikuengine | common/test/sms.py | 34 | 3749 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from django.conf import settings
from django.core import mail
from common import api
from common import clean
from common import exception
from common import profile
from common import sms as sms_service
from common.protocol import sms
from common.test import base
from common.test import util as test_util
class SmsTest(base.FixturesTestCase):
sender = '+14084900694'
target = settings.SMS_TARGET
def setUp(self):
super(SmsTest, self).setUp()
# this is actually a TestSmsConnection instance, overriden by the base
# classes
self.service = sms_service.SmsService(sms.SmsConnection())
self.service.init_handlers()
def receive(self, message, sender=None, target=None):
if sender is None:
sender = self.sender
if target is None:
target = self.target
self.service.handle_message(sender, target, message)
self.exhaust_queue_any()
outbox = sms.outbox[:]
sms.outbox = []
return outbox
def assertOutboxContains(self, outbox, pattern, sender=None):
if sender is None:
sender = self.sender
if type(pattern) is type(''):
pattern = re.compile(pattern)
for mobile, message in outbox:
if mobile == sender and pattern.search(message):
return True
self.fail('Not in outbox: /%s/ \n %s' % (pattern.pattern, outbox))
def sign_in(self, nick, sender=None):
password = self.passwords[clean.nick(nick)]
r = self.receive('SIGN IN %s %s' % (nick, password), sender=sender)
return r
# Note: all of these tests assume that double opt-in for the user
# has already been completed
def test_sign_in(self):
nick = 'popular'
password = self.passwords[clean.nick(nick)]
r = self.receive('SIGN IN %s %s' % (nick, password))
self.assertOutboxContains(r, 'Welcome to %s SMS %s' % (settings.SITE_NAME, nick))
def test_sign_on(self):
self.sign_in('popular')
r = self.receive('SIGN OUT')
self.assertOutboxContains(r, sms_service.HELP_SIGNED_OUT)
r = self.receive('SIGN OUT')
self.assertOutboxContains(r, sms_service.HELP_SIGN_IN)
def test_post_and_reply(self):
unpop = '+14083839393'
r = self.sign_in('unpopular', sender=unpop)
r = self.receive('on', sender=unpop)
r = self.sign_in('popular')
r = self.receive('on')
r = self.receive('bling blao')
self.assertOutboxContains(r, 'popular: bling blao', sender=unpop)
r = self.receive('@popular: sup dawg', sender=unpop)
self.assertOutboxContains(r, 'unpopular\^bb: sup dawg')
def test_whitelist(self):
o = test_util.override(SMS_MT_WHITELIST=re.compile('\+23'))
def _all_blocked():
r = self.sign_in('popular')
self.assertRaises(exception.ServiceError, _all_blocked)
r = self.sign_in('popular', '+2345678900')
self.assert_(r)
o.reset()
def test_blacklist(self):
o = test_util.override(SMS_MT_BLACKLIST=re.compile('\+1'))
def _all_blocked():
r = self.sign_in('popular')
self.assertRaises(exception.ServiceError, _all_blocked)
r = self.sign_in('popular', '+2345678900')
self.assert_(r)
o.reset()
| apache-2.0 |
avanov/django | django/utils/_os.py | 502 | 3581 | from __future__ import unicode_literals
import os
import sys
import tempfile
from os.path import abspath, dirname, isabs, join, normcase, normpath, sep
from django.core.exceptions import SuspiciousFileOperation
from django.utils import six
from django.utils.encoding import force_text
if six.PY2:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# Under Python 2, define our own abspath function that can handle joining
# unicode paths to a current working directory that has non-ASCII characters
# in it. This isn't necessary on Windows since the Windows version of abspath
# handles this correctly. It also handles drive letters differently than the
# pure Python implementation, so it's best not to replace it.
if six.PY3 or os.name == 'nt':
abspathu = abspath
else:
def abspathu(path):
"""
Version of os.path.abspath that uses the unicode representation
of the current working directory, thus avoiding a UnicodeDecodeError
in join when the cwd has non-ASCII characters.
"""
if not isabs(path):
path = join(os.getcwdu(), path)
return normpath(path)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
def npath(path):
"""
Always return a native path, that is unicode on Python 3 and bytestring on
Python 2.
"""
if six.PY2 and not isinstance(path, bytes):
return path.encode(fs_encoding)
return path
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = force_text(base)
paths = [force_text(p) for p in paths]
final_path = abspathu(join(base, *paths))
base_path = abspathu(base)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows),
# further, one of the following conditions must be true:
# a) The next character is the path separator (to prevent conditions like
# safe_join("/dir", "/../d"))
# b) The final path must be the same as the base path.
# c) The base path must be the most root path (meaning either "/" or "C:\\")
if (not normcase(final_path).startswith(normcase(base_path + sep)) and
normcase(final_path) != normcase(base_path) and
dirname(normcase(base_path)) != normcase(base_path)):
raise SuspiciousFileOperation(
'The joined path ({}) is located outside of the base path '
'component ({})'.format(final_path, base_path))
return final_path
def symlinks_supported():
"""
A function to check if creating symlinks are supported in the
host platform and/or if they are allowed to be created (e.g.
on Windows it requires admin permissions).
"""
tmpdir = tempfile.mkdtemp()
original_path = os.path.join(tmpdir, 'original')
symlink_path = os.path.join(tmpdir, 'symlink')
os.makedirs(original_path)
try:
os.symlink(original_path, symlink_path)
supported = True
except (OSError, NotImplementedError, AttributeError):
supported = False
else:
os.remove(symlink_path)
finally:
os.rmdir(original_path)
os.rmdir(tmpdir)
return supported
| bsd-3-clause |
thjashin/tensorflow | tensorflow/python/framework/common_shapes.py | 40 | 26355 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of common shape functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six.moves
from tensorflow.core.framework import types_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import cpp_shape_inference_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
def scalar_shape(unused_op):
"""Shape function for ops that output a scalar value."""
return [tensor_shape.scalar()]
def unchanged_shape(op):
"""Shape function for ops that output an tensor like their first input."""
return [op.inputs[0].get_shape()]
def unchanged_shape_with_rank(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: The exact rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_least(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: A lower bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_least(rank)]
return _ShapeFunction
def unchanged_shape_with_rank_at_most(rank):
"""Returns a shape function for ops that constrain the rank of their input.
Args:
rank: An upper bound on the rank of the input and output.
Returns:
A shape function for ops that output a tensor of the same size as their
input, with a particular rank.
"""
def _ShapeFunction(op):
return [op.inputs[0].get_shape().with_rank_at_most(rank)]
return _ShapeFunction
def matmul_shape(op):
"""Shape function for a MatMul op."""
a_shape = op.inputs[0].get_shape().with_rank(2)
transpose_a = op.get_attr("transpose_a")
b_shape = op.inputs[1].get_shape().with_rank(2)
transpose_b = op.get_attr("transpose_b")
output_rows = a_shape[1] if transpose_a else a_shape[0]
output_cols = b_shape[0] if transpose_b else b_shape[1]
inner_a = a_shape[0] if transpose_a else a_shape[1]
inner_b = b_shape[1] if transpose_b else b_shape[0]
inner_a.assert_is_compatible_with(inner_b)
return [tensor_shape.TensorShape([output_rows, output_cols])]
def get_conv_output_size(input_size, filter_size, strides, padding_type):
"""Returns the spatial size of a n-d convolution/pooling output."""
input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])
filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])
strides = [int(x) for x in strides]
if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):
return input_size
if any(x is not None and y is not None and x > y for x, y in
zip(filter_size, input_size)):
raise ValueError("Filter must not be larger than the input: "
"Filter: %r Input: %r" % (filter_size, input_size))
if padding_type == b"VALID":
def _valid(in_dim, k_dim, s_dim):
if in_dim is not None and k_dim is not None:
return (in_dim - k_dim + s_dim) // s_dim
else:
return None
output_size = [
_valid(in_dim, k_dim, s_dim)
for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)
]
elif padding_type == b"SAME":
def _same(in_dim, s_dim):
if in_dim is not None:
return (in_dim + s_dim - 1) // s_dim
else:
return None
output_size = [_same(in_dim, s_dim)
for in_dim, s_dim in zip(input_size, strides)]
else:
raise ValueError("Invalid padding: %r" % padding_type)
return tuple(output_size)
def get2d_conv_output_size(input_height, input_width, filter_height,
filter_width, row_stride, col_stride, padding_type):
"""Returns the number of rows and columns in a convolution/pooling output."""
return get_conv_output_size((input_height, input_width),
(filter_height, filter_width),
(row_stride, col_stride), padding_type)
def conv2d_shape(op):
"""Shape function for a Conv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A Conv2D Operation.
Returns:
A list containing the Shape of the Conv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
if data_format == b"NCHW":
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth_out]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def depthwise_conv2d_native_shape(op):
"""Shape function for a DepthwiseConv2D op.
This op has two inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* filter, a 4D tensor with shape = [filter_rows, filter_cols,
depth_in, depthwise_multiplier]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend
on the value of the op's "padding" and "strides" attrs.
Args:
op: A DepthwiseConv2dNative Operation.
Returns:
A list containing the Shape of the DepthwiseConv2DNative output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
filter_shape = op.inputs[1].get_shape().with_rank(4)
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = filter_shape[0]
filter_cols = filter_shape[1]
depth_out = filter_shape[3] * filter_shape[2]
# Check that the input depths are compatible.
input_shape[3].assert_is_compatible_with(filter_shape[2])
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def separable_conv2d_shape(op):
"""Shape function for a SeparableConv2D op.
This op has three inputs:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
* depthwise_filter, a 4D tensor with shape = [filter_rows,
filter_cols, depth_in, depth_multiplier]
* pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *
depth_multiplier, depth_out]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "padding" and "strides" attrs.
Args:
op: A SeparableConv2D Operation.
Returns:
A list containing the Shape of the SeparableConv2D output.
Raises:
ValueError: If the shapes of the input or filter are incompatible.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
depthwise_filter_shape = op.inputs[1].get_shape().merge_with(
tensor_shape.TensorShape([None, None, input_shape[3], None]))
pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]
pointwise_filter_shape = op.inputs[2].get_shape().merge_with(
tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
filter_rows = depthwise_filter_shape[0]
filter_cols = depthwise_filter_shape[1]
depth_out = pointwise_filter_shape[3]
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not yet support "
"strides in the batch and depth dimensions.")
if stride_r != stride_c:
# TODO(shlens): Add support for this.
raise ValueError("Current implementation only supports equal length "
"strides in the row and column dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
stride = stride_r
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,
filter_cols, stride, stride,
padding)
return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]
def avg_pool_shape(op):
"""Shape function for an AvgPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows and out_cols depend on the
value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: An AvgPool Operation.
Returns:
A single-element list containing the Shape of the AvgPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1 or ksize_d != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch and depth dimensions.")
if stride_b != 1 or stride_d != 1:
raise ValueError("Current implementation does not support strides "
"in the batch and depth dimensions.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def max_pool_shape(op):
"""Shape function for a MaxPool op.
This op has one input:
* input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]
The output is a 4D tensor with shape = [batch_size, out_rows,
out_cols, depth_out], where out_rows, out_cols, and depth_out depend
on the value of the op's "ksize", "strides", and "padding" attrs.
Args:
op: A MaxPool Operation.
Returns:
A single-element list containing the Shape of the MaxPool output.
Raises:
ValueError: If the shape of the input is invalid or incompatible with
the values of the attrs.
"""
input_shape = op.inputs[0].get_shape().with_rank(4)
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
if data_format == b"NCHW":
# Convert input shape to the default NHWC for inference.
input_shape = [input_shape[0], input_shape[2], input_shape[3],
input_shape[1]]
if data_format == b"NCHW":
ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr("ksize")
stride_b, stride_d, stride_r, stride_c = op.get_attr("strides")
else:
ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr("ksize")
stride_b, stride_r, stride_c, stride_d = op.get_attr("strides")
batch_size = input_shape[0]
in_rows = input_shape[1]
in_cols = input_shape[2]
depth = input_shape[3]
if ksize_b != 1:
raise ValueError("Current implementation does not support pooling "
"in the batch dimension.")
if stride_b != 1:
raise ValueError("Current implementation does not support strides "
"in the batch dimension.")
if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):
raise ValueError("MaxPooling supports exactly one of pooling across depth "
"or pooling across width/height.")
# TODO(mrry,shlens): Raise an error if the stride would cause
# information in the input to be ignored. This will require a change
# in the kernel implementation.
if ksize_d == 1:
padding = op.get_attr("padding")
out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,
ksize_c, stride_r, stride_c,
padding)
output_shape = [batch_size, out_rows, out_cols, depth]
else:
if depth % ksize_d > 0:
raise ValueError("Depthwise max pooling requires the depth window "
"to evenly divide the input depth.")
if stride_d != ksize_d:
raise ValueError("Depthwise max pooling requires the depth window "
"to equal the depth stride.")
output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]
if data_format == b"NCHW":
# Convert output shape back to NCHW.
output_shape = [output_shape[0], output_shape[3], output_shape[1],
output_shape[2]]
return [tensor_shape.TensorShape(output_shape)]
def no_outputs(unused_op):
"""Shape function for use with ops that have no outputs."""
return []
def unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
def broadcast_shape(shape_x, shape_y):
"""Returns the broadcasted shape between `shape_x` and `shape_y`.
Args:
shape_x: A `TensorShape`
shape_y: A `TensorShape`
Returns:
A `TensorShape` representing the broadcasted shape.
Raises:
ValueError: If the two shapes can not be broadcasted.
"""
if shape_x.ndims is None or shape_y.ndims is None:
return tensor_shape.unknown_shape()
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return tensor_shape.TensorShape(return_dims)
def call_cpp_shape_fn(op,
input_tensors_needed=None,
input_tensors_as_shapes_needed=None,
debug_python_shape_fn=None,
require_shape_fn=True):
"""A shape function that delegates to the registered C++ shape function.
Args:
op: the node in the graph for which to compute output shapes.
input_tensors_needed: a list of input tensor indices for which to compute
the input tensor's value and pass to the C++ shape function.
input_tensors_as_shapes_needed: a list of input tensor indices for which to
compute the constant_value_as_shape and pass to the C++ shape function.
debug_python_shape_fn: For testing only during migration to using
call_cpp_shape_fn. Do not submit calls that set this,
as the comparison is slow. If non-None, the python shape function;
this function will be called and its output compared to that of
the C++ shape function.
require_shape_fn: If true, and the C++ shape function is not registered
in the current binary then an exception is raised; otherwise, if the
C++ shape function is not registered then unknown_shape is used.
Returns:
A dictionary with the following keys:
shapes: A TensorShape list of the output shapes of the op, as computed
using the C++ shape inference function registered for the op.
handle_shapes: A TensorShape list of the shapes for handle outputs, if
any.
handle_dtypes: A list of DataType enums for the handle outputs, if any.
Raises:
ValueError: If the C++ shape function returned an error (e.g. because the
shapes of the inputs are of the wrong rank or otherwise incompatible
according to the shape function).
RuntimeError: If the C++ shape function is not registered and
<require_shape_fn> is True.
"""
if op.type == "Const":
# To avoid serializing large constants, we special-case constant
# here, even though it has a C++ shape function. When Python
# calls the C / C-API directly, we should be able to remove this.
return {
"shapes": [tensor_shape.TensorShape(op.get_attr("value").tensor_shape)],
"handle_shapes": [tensor_shape.TensorShape(None).as_proto()],
"handle_dtypes": [types_pb2.DT_INVALID]
}
input_tensors_needed = input_tensors_needed or []
input_tensors_as_shapes_needed = input_tensors_as_shapes_needed or []
while True:
res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn)
if not isinstance(res, dict):
# Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
return res
# See if we need to evaluate some inputs.
if not res["inputs_needed"]:
return res
p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()
p = p.FromString(res["inputs_needed"])
changed = False
for idx in p.input_tensors_needed:
if idx not in input_tensors_needed:
input_tensors_needed.append(idx)
changed = True
for idx in p.input_tensors_as_shapes_needed:
if idx not in input_tensors_as_shapes_needed:
input_tensors_as_shapes_needed.append(idx)
changed = True
if not changed:
return res
def _call_cpp_shape_fn_impl(
op, input_tensors_needed,
input_tensors_as_shapes_needed,
debug_python_shape_fn, require_shape_fn):
"""Core implementaton of call_cpp_shape_fn."""
graph_def_version = op.graph.graph_def_versions.producer
node_def_str = op.node_def.SerializeToString()
def tensor_to_inference_result(t):
r = cpp_shape_inference_pb2.CppShapeInferenceResult()
r.shape.CopyFrom(t.get_shape().as_proto())
# pylint: disable=protected-access
r.handle_shape.CopyFrom(t._handle_shape)
r.handle_dtype = t._handle_dtype
# pylint: enable=protected-access
return r.SerializeToString()
input_shapes = [tensor_to_inference_result(i) for i in op.inputs]
input_tensors = [None for i in input_shapes]
for idx in input_tensors_needed:
v = tensor_util.constant_value(op.inputs[idx])
if v is not None:
input_tensors[idx] = np.asarray(v)
serialized_unknown_shape = (
tensor_shape.TensorShape(None).as_proto().SerializeToString())
arr = [serialized_unknown_shape for i in input_shapes]
for idx in input_tensors_as_shapes_needed:
s = tensor_util.constant_value_as_shape(op.inputs[idx])
if s is not None:
arr[idx] = s.as_proto().SerializeToString()
input_tensors_as_shapes = arr
missing_shape_fn = False
try:
with errors.raise_exception_on_not_ok_status() as status:
output = pywrap_tensorflow.RunCppShapeInference(
graph_def_version, node_def_str, input_shapes, input_tensors,
input_tensors_as_shapes, status)
except errors.InvalidArgumentError as err:
if err.message.startswith("No shape inference function exists for op"):
missing_shape_fn = True
else:
raise ValueError(err.message)
if missing_shape_fn:
if require_shape_fn:
raise RuntimeError(
"No C++ shape function registered for standard op: %s" % op.type)
return unknown_shape(op)
output_shapes = output[:-1]
# Convert TensorShapeProto values in output_shapes.
result_protos = [
cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)
for s in output_shapes
]
result = [r.shape for r in result_protos]
result_handle_shapes = [r.handle_shape for r in result_protos]
result_handle_dtypes = [r.handle_dtype for r in result_protos]
if debug_python_shape_fn:
try:
python_result = [tensor_shape.as_shape(s)
for s in debug_python_shape_fn(op)]
except Exception as err:
raise AssertionError("Python shape function return error but "
"C++ shape functon did not: %s" % str(err))
result_as_shapes = [tensor_shape.as_shape(s) for s in result]
if str(result_as_shapes) != str(python_result):
raise ValueError(
("Python vs CPP shape mismatch. "
"CPP: %s vs python: %s on node %s "
"with input shapes %s") % (
str(result_as_shapes), str(python_result), str(op.node_def),
",".join([str(i.get_shape()) for i in op.inputs])))
return {"shapes": result,
"handle_shapes": result_handle_shapes,
"handle_dtypes": result_handle_dtypes,
"inputs_needed": output[-1]}
# pylint: disable=protected-access
ops._set_call_cpp_shape_fn(call_cpp_shape_fn)
# pylint: enable=protected-access
| apache-2.0 |
Designist/sympy | sympy/functions/elementary/tests/test_complexes.py | 15 | 25268 | from sympy import (
Abs, adjoint, arg, atan2, conjugate, cos, DiracDelta, E, exp, expand,
Expr, Function, Heaviside, I, im, log, nan, oo, pi, Rational, re, S,
sign, sin, sqrt, Symbol, symbols, transpose, zoo, exp_polar, Piecewise,
Interval, comp, Integral)
from sympy.utilities.pytest import XFAIL, raises
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2 + I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == I*r*im(x)
assert re(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2)
assert re(x).rewrite(im) == x - im(x)
assert (x + re(y)).rewrite(re, im) == x + y - im(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2 + I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(
sqrt(a + b*I)) == (a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2
assert im(x).rewrite(re) == x - re(x)
assert (x + im(y)).rewrite(im, re) == x + y - re(y)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
assert sign(2 + 2*I).doit() == sqrt(2)*(2 + 2*I)/4
assert sign(2 + 3*I).simplify() == sign(2 + 3*I)
assert sign(2 + 2*I).simplify() == sign(1 + I)
assert sign(im(sqrt(1 - sqrt(3)))) == 1
assert sign(sqrt(1 - sqrt(3))) == I
x = Symbol('x')
assert sign(x).is_finite is True
assert sign(x).is_complex is True
assert sign(x).is_imaginary is None
assert sign(x).is_integer is None
assert sign(x).is_real is None
assert sign(x).is_zero is None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
m = Symbol('m', negative=True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_imaginary is True
assert sign(x).is_integer is False
assert sign(x).is_real is False
assert sign(x).is_zero is False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_imaginary is False
assert sign(x).is_integer is True
assert sign(x).is_real is True
assert sign(x).is_zero is True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz).is_imaginary is False
assert sign(nz).is_integer is True
assert sign(nz).is_real is True
assert sign(nz).is_zero is False
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
assert sign(Symbol('x', nonnegative=True)).is_nonnegative
assert sign(Symbol('x', nonnegative=True)).is_nonpositive is None
assert sign(Symbol('x', nonpositive=True)).is_nonnegative is None
assert sign(Symbol('x', nonpositive=True)).is_nonpositive
assert sign(Symbol('x', real=True)).is_nonnegative is None
assert sign(Symbol('x', real=True)).is_nonpositive is None
assert sign(Symbol('x', real=True, zero=False)).is_nonpositive is None
x, y = Symbol('x', real=True), Symbol('y')
assert sign(x).rewrite(Piecewise) == \
Piecewise((1, x > 0), (-1, x < 0), (0, True))
assert sign(y).rewrite(Piecewise) == sign(y)
assert sign(x).rewrite(Heaviside) == 2*Heaviside(x)-1
assert sign(y).rewrite(Heaviside) == sign(y)
# evaluate what can be evaluated
assert sign(exp_polar(I*pi)*pi) is S.NegativeOne
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert sign(eq).func is sign or sign(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert sign(d).func is sign or sign(d) == 0
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 6261
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2),
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 3853
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
(
(a**2 + b**2)**Rational(
1, 4)*cos(atan2(b, a)/2)/2 + Rational(1, 2),
(a**2 + b**2)**Rational(1, 4)*sin(atan2(b, a)/2)/2)
assert sqrt(a**2).as_real_imag() == (sqrt(a**2), 0)
i = symbols('i', imaginary=True)
assert sqrt(i**2).as_real_imag() == (0, abs(i))
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
raises(TypeError, lambda: Abs(Interval(2, 3))) # issue 8717
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert Abs((-1)**n) == 1
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (
Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
assert Abs(x)**-3 == Abs(x)/(x**4)
assert Abs(x**3) == x**2*Abs(x)
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
eq = -sqrt(10 + 6*sqrt(3)) + sqrt(1 + sqrt(3)) + sqrt(3 + 3*sqrt(3))
# if there is a fast way to know when you can and when you cannot prove an
# expression like this is zero then the equality to zero is ok
assert abs(eq).func is Abs or abs(eq) == 0
# but sometimes it's hard to do this so it's better not to load
# abs down with tests that will be very slow
q = 1 + sqrt(2) - 2*sqrt(3) + 1331*sqrt(6)
p = expand(q**3)**Rational(1, 3)
d = p - q
assert abs(d).func is Abs or abs(d) == 0
assert Abs(4*exp(pi*I/4)) == 4
assert Abs(3**(2 + I)) == 9
assert Abs((-3)**(1 - I)) == 3*exp(pi)
assert Abs(oo) is oo
assert Abs(-oo) is oo
assert Abs(oo + I) is oo
assert Abs(oo + I*oo) is oo
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
x = Symbol('x')
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
x, y = Symbol('x', real=True), Symbol('y')
assert Abs(x).rewrite(Piecewise) == Piecewise((x, x >= 0), (-x, True))
assert Abs(y).rewrite(Piecewise) == Abs(y)
assert Abs(y).rewrite(sign) == y/sign(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real is True
assert Abs(x).is_rational is None
assert Abs(x).is_positive is None
assert Abs(x).is_nonnegative is True
z = Symbol('z', complex=True, zero=False)
assert Abs(z).is_real is True
assert Abs(z).is_rational is None
assert Abs(z).is_positive is True
assert Abs(z).is_zero is False
p = Symbol('p', positive=True)
assert Abs(p).is_real is True
assert Abs(p).is_rational is None
assert Abs(p).is_positive is True
assert Abs(p).is_zero is False
q = Symbol('q', rational=True)
assert Abs(q).is_rational is True
assert Abs(q).is_integer is None
assert Abs(q).is_positive is None
assert Abs(q).is_nonnegative is True
i = Symbol('i', integer=True)
assert Abs(i).is_integer is True
assert Abs(i).is_positive is None
assert Abs(i).is_nonnegative is True
e = Symbol('n', even=True)
ne = Symbol('ne', real=True, even=False)
assert Abs(e).is_even
assert Abs(ne).is_even is False
assert Abs(i).is_even is None
o = Symbol('n', odd=True)
no = Symbol('no', real=True, odd=False)
assert Abs(o).is_odd
assert Abs(no).is_odd is False
assert Abs(i).is_odd is None
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1 + I) == pi/4
assert arg(-1 + I) == 3*pi/4
assert arg(1 - I) == -pi/4
f = Function('f')
assert not arg(f(0) + I*f(1)).atoms(re)
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
e = p + I*p**2
assert arg(e) == arg(1 + p*I)
# make sure sign doesn't swap
e = -2*p + 4*I*p**2
assert arg(e) == arg(-1 + 2*p*I)
# make sure sign isn't lost
x = symbols('x', real=True) # could be zero
e = x + I*x
assert arg(e) == arg(x*(1 + I))
assert arg(e/p) == arg(x*(1 + I))
e = p*cos(p) + I*log(p)*exp(p)
assert arg(e).args[0] == e
# keep it simple -- let the user do more advanced cancellation
e = (p + 1) + I*(p**2 - 1)
assert arg(e).args[0] == e
def test_arg_rewrite():
assert arg(1 + I) == atan2(1, 1)
x = Symbol('x', real=True)
y = Symbol('y', real=True)
assert arg(x + I*y).rewrite(atan2) == atan2(y, x)
def test_adjoint():
a = Symbol('a', antihermitian=True)
b = Symbol('b', hermitian=True)
assert adjoint(a) == -a
assert adjoint(I*a) == I*a
assert adjoint(b) == b
assert adjoint(I*b) == -I*b
assert adjoint(a*b) == -b*a
assert adjoint(I*a*b) == I*b*a
x, y = symbols('x y')
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(x) * adjoint(y)
assert adjoint(x / y) == adjoint(x) / adjoint(y)
assert adjoint(-x) == -adjoint(x)
x, y = symbols('x y', commutative=False)
assert adjoint(adjoint(x)) == x
assert adjoint(x + y) == adjoint(x) + adjoint(y)
assert adjoint(x - y) == adjoint(x) - adjoint(y)
assert adjoint(x * y) == adjoint(y) * adjoint(x)
assert adjoint(x / y) == 1 / adjoint(y) * adjoint(x)
assert adjoint(-x) == -adjoint(x)
def test_conjugate():
a = Symbol('a', real=True)
b = Symbol('b', imaginary=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
assert conjugate(b) == -b
assert conjugate(I*b) == I*b
assert conjugate(a*b) == -a*b
assert conjugate(I*a*b) == I*a*b
x, y = symbols('x y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
a = Symbol('a', algebraic=True)
t = Symbol('t', transcendental=True)
assert re(a).is_algebraic
assert re(x).is_algebraic is None
assert re(t).is_algebraic is False
def test_conjugate_transpose():
x = Symbol('x')
assert conjugate(transpose(x)) == adjoint(x)
assert transpose(conjugate(x)) == adjoint(x)
assert adjoint(transpose(x)) == conjugate(x)
assert transpose(adjoint(x)) == conjugate(x)
assert adjoint(conjugate(x)) == transpose(x)
assert conjugate(adjoint(x)) == transpose(x)
class Symmetric(Expr):
def _eval_adjoint(self):
return None
def _eval_conjugate(self):
return None
def _eval_transpose(self):
return self
x = Symmetric()
assert conjugate(x) == adjoint(x)
assert transpose(x) == x
def test_transpose():
a = Symbol('a', complex=True)
assert transpose(a) == a
assert transpose(I*a) == I*a
x, y = symbols('x y')
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(x) * transpose(y)
assert transpose(x / y) == transpose(x) / transpose(y)
assert transpose(-x) == -transpose(x)
x, y = symbols('x y', commutative=False)
assert transpose(transpose(x)) == x
assert transpose(x + y) == transpose(x) + transpose(y)
assert transpose(x - y) == transpose(x) - transpose(y)
assert transpose(x * y) == transpose(y) * transpose(x)
assert transpose(x / y) == 1 / transpose(y) * transpose(x)
assert transpose(-x) == -transpose(x)
def test_polarify():
from sympy import polar_lift, polarify
x = Symbol('x')
z = Symbol('z', polar=True)
f = Function('f')
ES = {}
assert polarify(-1) == (polar_lift(-1), ES)
assert polarify(1 + I) == (polar_lift(1 + I), ES)
assert polarify(exp(x), subs=False) == exp(x)
assert polarify(1 + x, subs=False) == 1 + x
assert polarify(f(I) + x, subs=False) == f(polar_lift(I)) + x
assert polarify(x, lift=True) == polar_lift(x)
assert polarify(z, lift=True) == z
assert polarify(f(x), lift=True) == f(polar_lift(x))
assert polarify(1 + x, lift=True) == polar_lift(1 + x)
assert polarify(1 + f(x), lift=True) == polar_lift(1 + f(polar_lift(x)))
newex, subs = polarify(f(x) + z)
assert newex.subs(subs) == f(x) + z
mu = Symbol("mu")
sigma = Symbol("sigma", positive=True)
# Make sure polarify(lift=True) doesn't try to lift the integration
# variable
assert polarify(
Integral(sqrt(2)*x*exp(-(-mu + x)**2/(2*sigma**2))/(2*sqrt(pi)*sigma),
(x, -oo, oo)), lift=True) == Integral(sqrt(2)*(sigma*exp_polar(0))**exp_polar(I*pi)*
exp((sigma*exp_polar(0))**(2*exp_polar(I*pi))*exp_polar(I*pi)*polar_lift(-mu + x)**
(2*exp_polar(0))/2)*exp_polar(0)*polar_lift(x)/(2*sqrt(pi)), (x, -oo, oo))
def test_unpolarify():
from sympy import (exp_polar, polar_lift, exp, unpolarify,
principal_branch)
from sympy import gamma, erf, sin, tanh, uppergamma, Eq, Ne
from sympy.abc import x
p = exp_polar(7*I) + 1
u = exp(7*I) + 1
assert unpolarify(1) == 1
assert unpolarify(p) == u
assert unpolarify(p**2) == u**2
assert unpolarify(p**x) == p**x
assert unpolarify(p*x) == u*x
assert unpolarify(p + x) == u + x
assert unpolarify(sqrt(sin(p))) == sqrt(sin(u))
# Test reduction to principal branch 2*pi.
t = principal_branch(x, 2*pi)
assert unpolarify(t) == x
assert unpolarify(sqrt(t)) == sqrt(t)
# Test exponents_only.
assert unpolarify(p**p, exponents_only=True) == p**u
assert unpolarify(uppergamma(x, p**p)) == uppergamma(x, p**u)
# Test functions.
assert unpolarify(sin(p)) == sin(u)
assert unpolarify(tanh(p)) == tanh(u)
assert unpolarify(gamma(p)) == gamma(u)
assert unpolarify(erf(p)) == erf(u)
assert unpolarify(uppergamma(x, p)) == uppergamma(x, p)
assert unpolarify(uppergamma(sin(p), sin(p + exp_polar(0)))) == \
uppergamma(sin(u), sin(u + 1))
assert unpolarify(uppergamma(polar_lift(0), 2*exp_polar(0))) == \
uppergamma(0, 2)
assert unpolarify(Eq(p, 0)) == Eq(u, 0)
assert unpolarify(Ne(p, 0)) == Ne(u, 0)
assert unpolarify(polar_lift(x) > 0) == (x > 0)
# Test bools
assert unpolarify(True) is True
def test_issue_4035():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue_3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue_4754_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue_4757():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1 + I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1 + I*x**2).doit() == 2*x/(1 + x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1 + y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I + y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive=True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1 + I)**2), pi/2)
assert N_equals(unbranched_argument((1 - I)**2), -pi/2)
assert N_equals(periodic_argument((1 + I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1 - I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) == \
periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) == \
periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) == \
periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) == \
periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
assert Abs(polar_lift(1 + I)) == Abs(1 + I)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
@XFAIL
def test_issue_6167_6151():
n = pi**1000
i = int(n)
assert sign(n - i) == 1
assert abs(n - i) == n - i
eps = pi**-1500
big = pi**1000
one = cos(x)**2 + sin(x)**2
e = big*one - big + eps
assert sign(simplify(e)) == 1
for xi in (111, 11, 1, S(1)/10):
assert sign(e.subs(x, xi)) == 1
| bsd-3-clause |
obruns/gtest | test/gtest_output_test.py | 188 | 12260 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = '[email protected] (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO([email protected]): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| bsd-3-clause |
cschenck/blender_sim | fluid_sim_deps/blender-2.69/2.69/scripts/addons/io_scene_3ds/__init__.py | 1 | 6950 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
bl_info = {
"name": "Autodesk 3DS format",
"author": "Bob Holcomb, Campbell Barton",
"blender": (2, 57, 0),
"location": "File > Import-Export",
"description": "Import-Export 3DS, meshes, uvs, materials, textures, "
"cameras & lamps",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Autodesk_3DS",
"tracker_url": "",
"support": 'OFFICIAL',
"category": "Import-Export"}
if "bpy" in locals():
import imp
if "import_3ds" in locals():
imp.reload(import_3ds)
if "export_3ds" in locals():
imp.reload(export_3ds)
import bpy
from bpy.props import StringProperty, FloatProperty, BoolProperty, EnumProperty
from bpy_extras.io_utils import (ImportHelper,
ExportHelper,
axis_conversion,
)
class Import3DS(bpy.types.Operator, ImportHelper):
"""Import from 3DS file format (.3ds)"""
bl_idname = "import_scene.autodesk_3ds"
bl_label = 'Import 3DS'
bl_options = {'UNDO'}
filename_ext = ".3ds"
filter_glob = StringProperty(default="*.3ds", options={'HIDDEN'})
constrain_size = FloatProperty(
name="Size Constraint",
description="Scale the model by 10 until it reaches the "
"size constraint (0 to disable)",
min=0.0, max=1000.0,
soft_min=0.0, soft_max=1000.0,
default=10.0,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirectories for any associated images "
"(Warning, may be slow)",
default=True,
)
use_apply_transform = BoolProperty(
name="Apply Transform",
description="Workaround for object transformations "
"importing incorrectly",
default=True,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import import_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
))
global_matrix = axis_conversion(from_forward=self.axis_forward,
from_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return import_3ds.load(self, context, **keywords)
class Export3DS(bpy.types.Operator, ExportHelper):
"""Export to 3DS file format (.3ds)"""
bl_idname = "export_scene.autodesk_3ds"
bl_label = 'Export 3DS'
filename_ext = ".3ds"
filter_glob = StringProperty(
default="*.3ds",
options={'HIDDEN'},
)
use_selection = BoolProperty(
name="Selection Only",
description="Export selected objects only",
default=False,
)
axis_forward = EnumProperty(
name="Forward",
items=(('X', "X Forward", ""),
('Y', "Y Forward", ""),
('Z', "Z Forward", ""),
('-X', "-X Forward", ""),
('-Y', "-Y Forward", ""),
('-Z', "-Z Forward", ""),
),
default='Y',
)
axis_up = EnumProperty(
name="Up",
items=(('X', "X Up", ""),
('Y', "Y Up", ""),
('Z', "Z Up", ""),
('-X', "-X Up", ""),
('-Y', "-Y Up", ""),
('-Z', "-Z Up", ""),
),
default='Z',
)
def execute(self, context):
from . import export_3ds
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
"check_existing",
))
global_matrix = axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4()
keywords["global_matrix"] = global_matrix
return export_3ds.save(self, context, **keywords)
# Add to a menu
def menu_func_export(self, context):
self.layout.operator(Export3DS.bl_idname, text="3D Studio (.3ds)")
def menu_func_import(self, context):
self.layout.operator(Import3DS.bl_idname, text="3D Studio (.3ds)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
# NOTES:
# why add 1 extra vertex? and remove it when done? -
# "Answer - eekadoodle - would need to re-order UV's without this since face
# order isnt always what we give blender, BMesh will solve :D"
#
# disabled scaling to size, this requires exposing bb (easy) and understanding
# how it works (needs some time)
if __name__ == "__main__":
register()
| gpl-3.0 |
HyperloopTeam/FullOpenMDAO | cantera-2.0.2/interfaces/python/MixMaster/Units/unit.py | 1 | 2833 | import operator
class unit:
_zero = (0,) * 7
_negativeOne = (-1, ) * 7
_labels = ('m', 'kg', 's', 'A', 'K', 'mol', 'cd')
def __init__(self, value, derivation):
self.value = value
self.derivation = derivation
return
def __add__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value + other.value, self.derivation)
def __sub__(self, other):
if not self.derivation == other.derivation:
raise ImcompatibleUnits(self, other)
return unit(self.value - other.value, self.derivation)
def __mul__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(other*self.value, self.derivation)
value = self.value * other.value
derivation = tuple(map(operator.add, self.derivation, other.derivation))
return unit(value, derivation)
def __div__(self, other):
if type(other) == type(0) or type(other) == type(0.0):
return unit(self.value/other, self.derivation)
value = self.value / other.value
derivation = tuple(map(operator.sub, self.derivation, other.derivation))
return unit(value, derivation)
def __pow__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation
value = self.value ** other
derivation = tuple(map(operator.mul, [other]*7, self.derivation))
return unit(value, derivation)
def __pos__(self): return self
def __neg__(self): return unit(-self.value, self.derivation)
def __abs__(self): return unit(abs(self.value), self.derivation)
def __invert__(self):
value = 1./self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __rmul__(self, other):
return unit.__mul__(self, other)
def __rdiv__(self, other):
if type(other) != type(0) and type(other) != type(0.0):
raise BadOperation(self, other)
value = other/self.value
derivation = tuple(map(operator.mul, self._negativeOne, self.derivation))
return unit(value, derivation)
def __float__(self):
return self.value
#if self.derivation == self._zero: return self.value
#raise BadConversion(self)
def __str__(self):
str = "%g" % self.value
for i in range(0, 7):
exponent = self.derivation[i]
if exponent == 0: continue
if exponent == 1:
str = str + " %s" % (self._labels[i])
else:
str = str + " %s^%d" % (self._labels[i], exponent)
return str
dimensionless = unit(1, unit._zero)
| gpl-2.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/lib2to3/pytree.py | 71 | 28305 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <[email protected]>"
import sys
import warnings
from io import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def __ne__(self, other):
"""
Compare two nodes for inequality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return not self._eq(other)
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
yield from child.leaves()
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
def _prefix_getter(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
def _prefix_setter(self, prefix):
if self.children:
self.children[0].prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
def _prefix_getter(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
def _prefix_setter(self, prefix):
self.changed()
self._prefix = prefix
prefix = property(_prefix_getter, _prefix_setter)
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| lgpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-compute/azure/mgmt/compute/v2016_03_30/models/windows_configuration_py3.py | 1 | 2719 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WindowsConfiguration(Model):
"""Specifies Windows operating system settings on the virtual machine.
:param provision_vm_agent: Indicates whether virtual machine agent should
be provisioned on the virtual machine. <br><br> When this property is not
specified in the request body, default behavior is to set it to true.
This will ensure that VM Agent is installed on the VM so that extensions
can be added to the VM later.
:type provision_vm_agent: bool
:param enable_automatic_updates: Indicates whether virtual machine is
enabled for automatic updates.
:type enable_automatic_updates: bool
:param time_zone: Specifies the time zone of the virtual machine. e.g.
"Pacific Standard Time"
:type time_zone: str
:param additional_unattend_content: Specifies additional base-64 encoded
XML formatted information that can be included in the Unattend.xml file,
which is used by Windows Setup.
:type additional_unattend_content:
list[~azure.mgmt.compute.v2016_03_30.models.AdditionalUnattendContent]
:param win_rm: Specifies the Windows Remote Management listeners. This
enables remote Windows PowerShell.
:type win_rm: ~azure.mgmt.compute.v2016_03_30.models.WinRMConfiguration
"""
_attribute_map = {
'provision_vm_agent': {'key': 'provisionVMAgent', 'type': 'bool'},
'enable_automatic_updates': {'key': 'enableAutomaticUpdates', 'type': 'bool'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'additional_unattend_content': {'key': 'additionalUnattendContent', 'type': '[AdditionalUnattendContent]'},
'win_rm': {'key': 'winRM', 'type': 'WinRMConfiguration'},
}
def __init__(self, *, provision_vm_agent: bool=None, enable_automatic_updates: bool=None, time_zone: str=None, additional_unattend_content=None, win_rm=None, **kwargs) -> None:
super(WindowsConfiguration, self).__init__(**kwargs)
self.provision_vm_agent = provision_vm_agent
self.enable_automatic_updates = enable_automatic_updates
self.time_zone = time_zone
self.additional_unattend_content = additional_unattend_content
self.win_rm = win_rm
| mit |
ppwwyyxx/tensorflow | tensorflow/python/keras/utils/version_utils.py | 3 | 2667 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities for Keras classes with v1 and v2 versions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.util import lazy_loader
# TODO(b/134426265): Switch back to single-quotes once the issue
# with copybara is fixed.
# pylint: disable=g-inconsistent-quotes
training = lazy_loader.LazyLoader(
"training", globals(),
"tensorflow.python.keras.engine.training")
training_v1 = lazy_loader.LazyLoader(
"training_v1", globals(),
"tensorflow.python.keras.engine.training_v1")
# pylint: enable=g-inconsistent-quotes
# TODO(omalleyt): Extend to Layer class once Layer class is split.
class VersionSelector(object):
"""Chooses between Keras v1 and v2 Model class."""
def __new__(cls, *args, **kwargs): # pylint: disable=unused-argument
new_cls = swap_class(cls, training.Model, training_v1.Model)
return object.__new__(new_cls)
def swap_class(cls, v2_cls, v1_cls):
"""Swaps in v2_cls or v1_cls depending on graph mode."""
if cls == object:
return cls
if cls in (v2_cls, v1_cls):
if ops.executing_eagerly_outside_functions():
return v2_cls
return v1_cls
# Recursively search superclasses to swap in the right Keras class.
cls.__bases__ = tuple(
swap_class(base, v2_cls, v1_cls) for base in cls.__bases__)
return cls
def disallow_legacy_graph(cls_name, method_name):
if not ops.executing_eagerly_outside_functions():
error_msg = (
"Calling `{cls_name}.{method_name}` in graph mode is not supported "
"when the `{cls_name}` instance was constructed with eager mode "
"enabled. Please construct your `{cls_name}` instance in graph mode or"
" call `{cls_name}.{method_name}` with eager mode enabled.")
error_msg = error_msg.format(cls_name=cls_name, method_name=method_name)
raise ValueError(error_msg)
| apache-2.0 |
matiboy/django_safari_notifications | django_safari_notifications/apps.py | 1 | 1111 | # -*- coding: utf-8
from django.apps import AppConfig
import logging
class DjangoSafariNotificationsConfig(AppConfig):
name = 'django_safari_notifications'
verbose_name = 'Safari Push Notifications'
version = 'v1'
service_base = 'push'
userinfo_key = 'userinfo'
logger = logging.getLogger('django_safari_notifications')
# Provide path to a pem file containing the certificate, the key as well as Apple's WWDRCA
cert = 'path/to/your/cert'
passphrase = 'pass:xxxx' # this will be used with -passin in the openssl command so could be with pass, env etc
# If single site, just set these values. Otherwise create Domain entries
website_conf = None
# sample single site: do not include the authenticationToken
"""
website_conf = {
"websiteName": "Bay Airlines",
"websitePushID": "web.com.example.domain",
"allowedDomains": ["http://domain.example.com"],
"urlFormatString": "http://domain.example.com/%@/?flight=%@",
"webServiceURL": "https://example.com/push"
}
"""
iconset_folder = '/path/to/your/iconset'
| mit |
TomSkelly/MatchAnnot | showAnnot.py | 1 | 2299 | #!/usr/bin/env python
# Read annotation file, print selected stuff in human-readable format.
# AUTHOR: Tom Skelly ([email protected])
import os
import sys
import optparse
import re # regular expressions
import cPickle as pickle
from tt_log import logger
import Annotations as anno
VERSION = '20150417.01'
def main ():
logger.debug('version %s starting' % VERSION)
opt, args = getParms()
if opt.gtfpickle is not None:
handle = open (opt.gtfpickle, 'r')
pk = pickle.Unpickler (handle)
annotList = pk.load()
handle.close()
else:
annotList = anno.AnnotationList (opt.gtf)
geneList = annotList.getGene (opt.gene)
if geneList is None:
print 'gene %s not found in annotations' % opt.gene
elif len(geneList) != 1:
print 'there are %d occurrences of gene %s in annotations' % (len(geneList), opt.gene)
else:
geneEnt = geneList[0]
print 'gene: ',
printEnt (geneEnt)
for transEnt in geneEnt.getChildren():
print '\ntr: ',
printTran (transEnt)
for exonEnt in transEnt.getChildren():
print 'exon: ',
printEnt (exonEnt)
logger.debug('finished')
return
def printEnt (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1)
return
def printTran (ent):
print '%-15s %9d %9d %6d' % (ent.name, ent.start, ent.end, ent.end-ent.start+1),
if hasattr (ent, 'startcodon'):
print ' start: %9d' % ent.startcodon,
if hasattr (ent, 'stopcodon'):
print ' stop: %9d' % ent.stopcodon,
print
return
def getParms (): # use default input sys.argv[1:]
parser = optparse.OptionParser(usage='%prog [options] <fasta_file> ... ')
parser.add_option ('--gtf', help='annotations in gtf format')
parser.add_option ('--gtfpickle', help='annotations in pickled gtf format')
parser.add_option ('--gene', help='gene to print')
parser.set_defaults (gtf=None,
gtfpickle=None,
gene=None,
)
opt, args = parser.parse_args()
return opt, args
if __name__ == "__main__":
main()
| gpl-3.0 |
gugarosa/app_h | node_modules/node-ninja/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| gpl-3.0 |
jamesblunt/edx-platform | lms/djangoapps/branding/migrations/0002_auto__add_brandingapiconfig.py | 84 | 5295 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'BrandingApiConfig'
db.create_table('branding_brandingapiconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('branding', ['BrandingApiConfig'])
def backwards(self, orm):
# Deleting model 'BrandingApiConfig'
db.delete_table('branding_brandingapiconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'branding.brandingapiconfig': {
'Meta': {'object_name': 'BrandingApiConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'branding.brandinginfoconfig': {
'Meta': {'object_name': 'BrandingInfoConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'configuration': ('django.db.models.fields.TextField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['branding']
| agpl-3.0 |
meredith-digops/ansible | lib/ansible/modules/utilities/helper/meta.py | 27 | 3847 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Ansible, a Red Hat company
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
module: meta
short_description: Execute Ansible 'actions'
version_added: "1.2"
description:
- Meta tasks are a special kind of task which can influence Ansible internal execution or state. Prior to Ansible 2.0,
the only meta option available was `flush_handlers`. As of 2.2, there are five meta tasks which can be used.
Meta tasks can be used anywhere within your playbook.
options:
free_form:
description:
- This module takes a free form command, as a string. There's not an actual option named "free form". See the examples!
- >
C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
- >
C(refresh_inventory) (added in 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
re-executed. This is mainly useful when additional hosts are created and users wish to use them instead of using the `add_host` module."
- "C(noop) (added in 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use."
- "C(clear_facts) (added in 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared, including the fact cache."
- "C(clear_host_errors) (added in 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts."
- "C(end_play) (added in 2.2) causes the play to end without failing the host."
- "C(reset_connection) (added in 2.3) interrupts a persistent connection (i.e. ssh + control persist)"
choices: ['noop', 'flush_handlers', 'refresh_inventory', 'clear_facts', 'clear_host_errors', 'end_play']
required: true
default: null
notes:
- meta is not really a module nor action_plugin as such it cannot be overwritten.
author:
- "Ansible Core Team"
'''
EXAMPLES = '''
- template:
src: new.j2
dest: /etc/config.txt
notify: myhandler
- name: force all notified handlers to run at this point, not waiting for normal sync points
meta: flush_handlers
- name: reload inventory, useful with dynamic inventories when play makes changes to the existing hosts
cloud_guest: # this is fake module
name: newhost
state: present
- name: Refresh inventory to ensure new instaces exist in inventory
meta: refresh_inventory
- name: Clear gathered facts from all currently targeted hosts
meta: clear_facts
- name: bring host back to play after failure
copy:
src: file
dest: /etc/file
remote_user: imightnothavepermission
- meta: clear_host_errors
- user: name={{ansible_user}} groups=input
- name: reset ssh connection to allow user changes to affect 'current login user'
meta: reset_connection
'''
| gpl-3.0 |
phuihock/birtconn | addons/report_birt/wizard/report_birt.py | 2 | 13419 | # -*- encoding: utf-8 -*-
from collections import OrderedDict
from datetime import datetime, date, time
from lxml import etree
from openerp import netsvc, tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.report.interface import report_int
from openerp.tools.translate import _
import openerp.pooler as pooler
import os
import re
import requests
import simplejson as json
def get_report_api(cr, uid, pool, context=None):
ir_config_parameter = pool.get("ir.config_parameter")
api = ir_config_parameter.get_param(cr, uid, "birtconn.api", context=context)
if not api:
# fallback, look in to config file
api = tools.config.get_misc('birtconn', 'api')
if not api:
raise ValueError("System property 'birtconn.api' is not defined.")
return os.path.join(api, 'report')
def serialize(obj):
if isinstance(obj, datetime):
return datetime.strftime(obj, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if isinstance(obj, date):
return datetime.strftime(obj, tools.DEFAULT_SERVER_DATE_FORMAT)
if isinstance(obj, time):
return datetime.strftime(obj, tools.DEFAULT_SERVER_TIME_FORMAT)
raise TypeError(obj)
class report_birt_report_wizard(osv.osv_memory):
_name = 'report_birt.report_wizard'
_description = 'BIRT Report Wizard'
_columns = {
'__report_name': fields.char(size=64, string="Report Name"),
'__values': fields.text(string="Values"),
}
def _report_get(self, cr, uid, context=None):
if 'report_name' in context:
report_obj = self.pool.get('ir.actions.report.xml')
found = report_obj.search(cr, uid, [('report_name', '=', context['report_name'])])
if found:
report_id = found[0]
report = report_obj.read(cr, uid, report_id, ['report_file'])
report_api = get_report_api(cr, uid, self.pool, context)
r = requests.get('%s?report_file=%s' % (report_api, report['report_file']))
return r.json()
return {}
def default_get_recursively(self, cr, uid, fields_list, parameters, context=None):
res = {}
for param in parameters:
name = param['name']
if name.startswith('__'):
key = name[2:]
if key in context:
res[name] = context[key]
else:
ptype = param['type'].split('/')
if ptype[0] == "scalar":
fieldType = param['fieldType']
if fieldType == 'time':
# 'time' is not supported by OpenERP by default, but datetime is. So, we treat time as datetime field
# and use our custom timepicker widget.
fieldType = 'datetime'
if fieldType == 'char' and ptype[1] == 'multi-value':
val = param['defaultValue']
elif fieldType in ['boolean']:
# unfortunately, boolean field setter converts False to 'False', a truthful value.
# so we override it with the raw value which is in the correct type.
val = param['defaultValue']
else:
conv = getattr(fields, fieldType)(string=param['promptText'])._symbol_set[1]
v1 = param['defaultValue']
if isinstance(v1, (list, tuple)):
val = [conv(v) for v in v1]
else:
val = conv(v1)
res[name] = val
elif ptype[0] == "group":
parameters_g = param['parameters']
res_g = self.default_get_recursively(cr, uid, fields_list, parameters_g, context)
res.update(res_g)
return res
def default_get(self, cr, uid, fields_list, context=None):
res = {}
parameters = self._report_get(cr, uid, context)
return self.default_get_recursively(cr, uid, fields_list, parameters, context)
def fields_get_meta(self, cr, uid, param, context, res, fgroup=None):
name = param['name']
meta = {}
ptype = param['type'].split('/')
if ptype[0] == 'group':
for p in param['parameters']:
self.fields_get_meta(cr, uid, p, context, res, {'name': param['name'], 'string': param['promptText']})
if ptype[0] == 'scalar':
meta['type'] = param['fieldType']
meta['context'] = {
'scalar': ptype[1]
}
# refer to field_to_dict if you don't understand why
if 'selection' in param and param['selection']:
meta['selection'] = param['selection']
meta['type'] = 'selection' # override default input type
meta['string'] = param['promptText']
meta['required'] = param['required']
meta['invisible'] = param['hidden']
meta['help'] = param['helpText']
meta['context']['type'] = param['fieldType'] # actual data type
if fgroup:
meta['context']['fgroup'] = fgroup
res[name] = meta
def fields_get(self, cr, uid, fields_list=None, context=None, write_access=True):
context = context or {}
res = super(report_birt_report_wizard, self).fields_get(cr, uid, fields_list, context, write_access)
for f in self._columns:
for attr in ['invisible', 'readonly']:
res[f][attr] = True
parameters = self._report_get(cr, uid, context)
for param in parameters:
self.fields_get_meta(cr, uid, param, context, res)
def order_by(parameters):
def _wrap(name):
for i, param in enumerate(parameters):
ptype = param['type'].split('/')
if ptype[0] == 'group':
j = order_by(param['parameters'])(name)
if j != -1:
return (i * 10) + j
elif ptype[0] == 'scalar' and param['name'] == name:
return (i * 10)
return -1
return _wrap
if parameters:
res_cur = res
res_new = OrderedDict()
# What we are trying to achieve here is to order fields by the order they (parameters) are defined in the rptdesign. Because
# a parameter may be a group parameter, we will also inspect the group by loop check its content.
for k in sorted(res, cmp, key=order_by(parameters)):
res_new[k] = res_cur[k]
res = res_new
# if no fields is specified, return all
if not fields_list:
return res
else:
# return only what is requested
return dict(filter(lambda (k, v): k in fields_list, a.items()))
def create(self, cr, uid, vals, context=None):
# 'vals' contains all field/value pairs, including report_name, parameters and values.
# But we don't have all the columns since they are dynamically generated
# based on report's parameters.
values = dict(filter(lambda (x, y): x not in ['__report_name', '__values'], vals.items()))
values = json.dumps(values)
report_name = context['report_name']
vals = {
'__report_name': report_name,
'__values': values,
}
return super(report_birt_report_wizard, self).create(cr, uid, vals, context)
def _get_lang_dict(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
lang = context and context.get('lang', 'en_US') or 'en_US'
lang_ids = lang_obj.search(cr, uid, [('code','=',lang)])
if not lang_ids:
lang_ids = lang_obj.search(cr, uid, [('code','=','en_US')])
lang_obj = lang_obj.browse(cr, uid, lang_ids[0])
return {'date_format': lang_obj.date_format, 'time_format': lang_obj.time_format}
def print_report(self, cr, uid, ids, context=None):
lang_dict = self._get_lang_dict(cr, uid, context)
values = self.read(cr, uid, ids[0], ['__values'], context=context)['__values']
values = json.loads(values)
def conv(v1):
# cast value to the correct type with OpenERP's internal setter
v2 = getattr(fields, t1)(**descriptor)._symbol_set[1](v1)
# but sometimes, we have to override value to a different type because
# OpenERP is slightly different/not supporting param type BIRT expects.
if t1 == 'char' and s1 == 'multi-value':
# our multivalue checkbox widget that sends values in an array
if not isinstance(v1, (list, tuple)):
v1 = [v1]
v2 = v1
elif t1 == 'time':
# NOTE: time is represented as datetime field with custom timepicker widget
time_format = lang_dict['time_format']
cc = time_format.count(':') + 1
v2 = datetime.strptime(':'.join(v1.split(':')[:cc]), time_format)
v2 = getattr(fields, 'datetime')(**descriptor)._symbol_set[1](v1)
return v2
report_name = context['report_name']
fg = self.fields_get(cr, uid, context={'report_name': report_name})
for (name, descriptor) in fg.items():
if name not in self._columns:
s1 = descriptor['context']['scalar']
t1 = descriptor['context']['type']
v1 = values[name]
if isinstance(v1, (list, tuple)):
v2 = [conv(v) for v in v1]
else:
v2 = conv(v1)
values[name] = v2
return {
'type': 'ir.actions.report.xml',
'report_name': context['report_name'],
'datas': values,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(report_birt_report_wizard, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu)
if view_type == 'search':
return res
xarch = etree.XML(res['arch'])
group_values = xarch.xpath('//group[@name="__values"]')[0]
for field, descriptor in self.fields_get(cr, uid, context=context).iteritems():
if 'context' in descriptor and 'fgroup' in descriptor['context']:
fgroup = descriptor['context']['fgroup']
elg = xarch.xpath('//group[@name="%(name)s"][@string="%(string)s"]' % fgroup)
if elg:
_g = elg[0]
else:
_g = etree.SubElement(group_values, 'group', name=fgroup['name'], string=fgroup['string'], colspan="2")
el = etree.SubElement(_g, 'field', name=field)
else:
el = etree.SubElement(group_values, 'field', name=field)
if field == 'time':
# use custom timepicker widget
el.set('widget', 'timepicker')
if 'multi-value' == descriptor.get('context', {}).get('scalar'):
# use custom multiselect widget
el.set('widget', 'multiselect')
xarch, xfields = self._view_look_dom_arch(cr, uid, xarch, view_id, context=context)
res['fields'] = xfields
res['arch'] = xarch
return res
report_birt_report_wizard()
class report_birt(report_int):
def __init__(self, name, table, reportxml_id):
super(report_birt, self).__init__(name)
self.table = table
self.reportxml_id = reportxml_id
def create(self, cr, uid, ids, vals, context):
pool = pooler.get_pool(cr.dbname)
pool_reportxml = pool.get('ir.actions.report.xml')
reportxml = pool_reportxml.browse(cr, uid, self.reportxml_id)
headers = {'Content-type': 'application/json', 'Accept': 'application/octet-stream'}
for k, v in context.items():
akey = '__%s' % k
if akey not in vals:
vals[akey] = v
data = {
'reportFile': reportxml.report_file,
'__values': vals,
}
report_api = get_report_api(cr, uid, pool, context)
req = requests.post(report_api, data=json.dumps(data, default=serialize), headers=headers)
ext = re.search(r'filename=.+\.(.+);?', req.headers.get('content-disposition')).group(1)
return (req.content, ext)
class registry(osv.osv):
_inherit = 'ir.actions.report.xml'
def register(self, cr, uid, ids, context=None):
svccls = netsvc.Service
if ids:
cr.execute("SELECT id, model, report_name FROM ir_act_report_xml WHERE id in (%s)" % (', '.join([str(id) for id in ids]),))
result = cr.dictfetchall()
for r in result:
name = 'report.' + r['report_name']
svccls.remove(name)
report_birt(name, r['model'], r['id'])
def register_all(self, cr):
cr.execute("SELECT id FROM ir_act_report_xml WHERE report_type = 'birt' ORDER BY id")
ids = [row['id'] for row in cr.dictfetchall()]
self.register(cr, SUPERUSER_ID, ids)
registry()
| agpl-3.0 |
eaas-framework/virtualbox | src/VBox/GuestHost/OpenGL/spu_loader/spucopy.py | 22 | 1634 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE AUTOMATICALLY GENERATED BY spucopy.py SCRIPT */
#include "cr_spu.h"
#include "cr_mem.h"
void crSPUCopyDispatchTable( SPUDispatchTable *dst, SPUDispatchTable *src )
{
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
print '\tdst->%s = src->%s;' % (func_name, func_name)
# if the destination is already a copy of something, we'd better make sure
# that we take it off its source's copy list first.
print """
if (dst->copy_of != NULL)
{
/*
* dst was already a copy, go back to the original,
* and remove dst from the original's copyList.
*/
struct _copy_list_node *temp, *prior = NULL;
for (temp = dst->copy_of->copyList; temp; prior = temp, temp = temp->next)
{
if (temp->copy == dst)
{
if (prior)
{
prior->next = temp->next;
}
else
{
dst->copy_of->copyList = temp->next;
}
crFree( temp );
break;
}
}
}
/*
* Now that dst->copy_of is unused, set it to point to our
* new original.
*/
if (src->copy_of)
{
dst->copy_of = src->copy_of;
}
else
{
dst->copy_of = src;
}
/*
* Create a new copy node, so the src can keep track of the
* new copy (i.e. dst).
*/
{
struct _copy_list_node *copynode;
copynode = (struct _copy_list_node*)crAlloc( sizeof( *copynode ) );
copynode->copy = dst;
copynode->next = src->copyList;
src->copyList = copynode;
}
}
"""
| gpl-2.0 |
sdcooke/django | django/utils/dateformat.py | 365 | 10712 | """
PHP date() style date formatting
See http://www.php.net/date for format strings
Usage:
>>> import datetime
>>> d = datetime.datetime.now()
>>> df = DateFormat(d)
>>> print(df.format('jS F Y H:i'))
7th October 2003 11:39
>>>
"""
from __future__ import unicode_literals
import calendar
import datetime
import re
import time
from django.utils import six
from django.utils.dates import (
MONTHS, MONTHS_3, MONTHS_ALT, MONTHS_AP, WEEKDAYS, WEEKDAYS_ABBR,
)
from django.utils.encoding import force_text
from django.utils.timezone import get_default_timezone, is_aware, is_naive
from django.utils.translation import ugettext as _
re_formatchars = re.compile(r'(?<!\\)([aAbBcdDeEfFgGhHiIjlLmMnNoOPrsStTUuwWyYzZ])')
re_escaped = re.compile(r'\\(.)')
class Formatter(object):
def format(self, formatstr):
pieces = []
for i, piece in enumerate(re_formatchars.split(force_text(formatstr))):
if i % 2:
pieces.append(force_text(getattr(self, piece)()))
elif piece:
pieces.append(re_escaped.sub(r'\1', piece))
return ''.join(pieces)
class TimeFormat(Formatter):
def __init__(self, obj):
self.data = obj
self.timezone = None
# We only support timezone when formatting datetime objects,
# not date objects (timezone information not appropriate),
# or time objects (against established django policy).
if isinstance(obj, datetime.datetime):
if is_naive(obj):
self.timezone = get_default_timezone()
else:
self.timezone = obj.tzinfo
def a(self):
"'a.m.' or 'p.m.'"
if self.data.hour > 11:
return _('p.m.')
return _('a.m.')
def A(self):
"'AM' or 'PM'"
if self.data.hour > 11:
return _('PM')
return _('AM')
def B(self):
"Swatch Internet time"
raise NotImplementedError('may be implemented in a future release')
def e(self):
"""
Timezone name.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
try:
if hasattr(self.data, 'tzinfo') and self.data.tzinfo:
# Have to use tzinfo.tzname and not datetime.tzname
# because datatime.tzname does not expect Unicode
return self.data.tzinfo.tzname(self.data) or ""
except NotImplementedError:
pass
return ""
def f(self):
"""
Time, in 12-hour hours and minutes, with minutes left off if they're
zero.
Examples: '1', '1:30', '2:05', '2'
Proprietary extension.
"""
if self.data.minute == 0:
return self.g()
return '%s:%s' % (self.g(), self.i())
def g(self):
"Hour, 12-hour format without leading zeros; i.e. '1' to '12'"
if self.data.hour == 0:
return 12
if self.data.hour > 12:
return self.data.hour - 12
return self.data.hour
def G(self):
"Hour, 24-hour format without leading zeros; i.e. '0' to '23'"
return self.data.hour
def h(self):
"Hour, 12-hour format; i.e. '01' to '12'"
return '%02d' % self.g()
def H(self):
"Hour, 24-hour format; i.e. '00' to '23'"
return '%02d' % self.G()
def i(self):
"Minutes; i.e. '00' to '59'"
return '%02d' % self.data.minute
def O(self):
"""
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
seconds = self.Z()
sign = '-' if seconds < 0 else '+'
seconds = abs(seconds)
return "%s%02d%02d" % (sign, seconds // 3600, (seconds // 60) % 60)
def P(self):
"""
Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off
if they're zero and the strings 'midnight' and 'noon' if appropriate.
Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'
Proprietary extension.
"""
if self.data.minute == 0 and self.data.hour == 0:
return _('midnight')
if self.data.minute == 0 and self.data.hour == 12:
return _('noon')
return '%s %s' % (self.f(), self.a())
def s(self):
"Seconds; i.e. '00' to '59'"
return '%02d' % self.data.second
def T(self):
"""
Time zone of this machine; e.g. 'EST' or 'MDT'.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
name = self.timezone.tzname(self.data) if self.timezone else None
if name is None:
name = self.format('O')
return six.text_type(name)
def u(self):
"Microseconds; i.e. '000000' to '999999'"
return '%06d' % self.data.microsecond
def Z(self):
"""
Time zone offset in seconds (i.e. '-43200' to '43200'). The offset for
timezones west of UTC is always negative, and for those east of UTC is
always positive.
If timezone information is not available, this method returns
an empty string.
"""
if not self.timezone:
return ""
offset = self.timezone.utcoffset(self.data)
# `offset` is a datetime.timedelta. For negative values (to the west of
# UTC) only days can be negative (days=-1) and seconds are always
# positive. e.g. UTC-1 -> timedelta(days=-1, seconds=82800, microseconds=0)
# Positive offsets have days=0
return offset.days * 86400 + offset.seconds
class DateFormat(TimeFormat):
year_days = [None, 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]
def b(self):
"Month, textual, 3 letters, lowercase; e.g. 'jan'"
return MONTHS_3[self.data.month]
def c(self):
"""
ISO 8601 Format
Example : '2008-01-02T10:30:00.000123'
"""
return self.data.isoformat()
def d(self):
"Day of the month, 2 digits with leading zeros; i.e. '01' to '31'"
return '%02d' % self.data.day
def D(self):
"Day of the week, textual, 3 letters; e.g. 'Fri'"
return WEEKDAYS_ABBR[self.data.weekday()]
def E(self):
"Alternative month names as required by some locales. Proprietary extension."
return MONTHS_ALT[self.data.month]
def F(self):
"Month, textual, long; e.g. 'January'"
return MONTHS[self.data.month]
def I(self):
"'1' if Daylight Savings Time, '0' otherwise."
if self.timezone and self.timezone.dst(self.data):
return '1'
else:
return '0'
def j(self):
"Day of the month without leading zeros; i.e. '1' to '31'"
return self.data.day
def l(self):
"Day of the week, textual, long; e.g. 'Friday'"
return WEEKDAYS[self.data.weekday()]
def L(self):
"Boolean for whether it is a leap year; i.e. True or False"
return calendar.isleap(self.data.year)
def m(self):
"Month; i.e. '01' to '12'"
return '%02d' % self.data.month
def M(self):
"Month, textual, 3 letters; e.g. 'Jan'"
return MONTHS_3[self.data.month].title()
def n(self):
"Month without leading zeros; i.e. '1' to '12'"
return self.data.month
def N(self):
"Month abbreviation in Associated Press style. Proprietary extension."
return MONTHS_AP[self.data.month]
def o(self):
"ISO 8601 year number matching the ISO week number (W)"
return self.data.isocalendar()[0]
def r(self):
"RFC 2822 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'"
return self.format('D, j M Y H:i:s O')
def S(self):
"English ordinal suffix for the day of the month, 2 characters; i.e. 'st', 'nd', 'rd' or 'th'"
if self.data.day in (11, 12, 13): # Special case
return 'th'
last = self.data.day % 10
if last == 1:
return 'st'
if last == 2:
return 'nd'
if last == 3:
return 'rd'
return 'th'
def t(self):
"Number of days in the given month; i.e. '28' to '31'"
return '%02d' % calendar.monthrange(self.data.year, self.data.month)[1]
def U(self):
"Seconds since the Unix epoch (January 1 1970 00:00:00 GMT)"
if isinstance(self.data, datetime.datetime) and is_aware(self.data):
return int(calendar.timegm(self.data.utctimetuple()))
else:
return int(time.mktime(self.data.timetuple()))
def w(self):
"Day of the week, numeric, i.e. '0' (Sunday) to '6' (Saturday)"
return (self.data.weekday() + 1) % 7
def W(self):
"ISO-8601 week number of year, weeks starting on Monday"
# Algorithm from http://www.personal.ecu.edu/mccartyr/ISOwdALG.txt
week_number = None
jan1_weekday = self.data.replace(month=1, day=1).weekday() + 1
weekday = self.data.weekday() + 1
day_of_year = self.z()
if day_of_year <= (8 - jan1_weekday) and jan1_weekday > 4:
if jan1_weekday == 5 or (jan1_weekday == 6 and calendar.isleap(self.data.year - 1)):
week_number = 53
else:
week_number = 52
else:
if calendar.isleap(self.data.year):
i = 366
else:
i = 365
if (i - day_of_year) < (4 - weekday):
week_number = 1
else:
j = day_of_year + (7 - weekday) + (jan1_weekday - 1)
week_number = j // 7
if jan1_weekday > 4:
week_number -= 1
return week_number
def y(self):
"Year, 2 digits; e.g. '99'"
return six.text_type(self.data.year)[2:]
def Y(self):
"Year, 4 digits; e.g. '1999'"
return self.data.year
def z(self):
"Day of the year; i.e. '0' to '365'"
doy = self.year_days[self.data.month] + self.data.day
if self.L() and self.data.month > 2:
doy += 1
return doy
def format(value, format_string):
"Convenience function"
df = DateFormat(value)
return df.format(format_string)
def time_format(value, format_string):
"Convenience function"
tf = TimeFormat(value)
return tf.format(format_string)
| bsd-3-clause |
Midrya/chromium | rietveld.py | 1 | 26054 | # coding: utf-8
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Defines class Rietveld to easily access a rietveld instance.
Security implications:
The following hypothesis are made:
- Rietveld enforces:
- Nobody else than issue owner can upload a patch set
- Verifies the issue owner credentials when creating new issues
- A issue owner can't change once the issue is created
- A patch set cannot be modified
"""
import copy
import errno
import json
import logging
import re
import socket
import ssl
import sys
import time
import urllib
import urllib2
import urlparse
import patch
from third_party import upload
import third_party.oauth2client.client as oa2client
from third_party import httplib2
# Appengine replies with 302 when authentication fails (sigh.)
oa2client.REFRESH_STATUS_CODES.append(302)
upload.LOGGER.setLevel(logging.WARNING) # pylint: disable=E1103
class Rietveld(object):
"""Accesses rietveld."""
def __init__(
self, url, auth_config, email=None, extra_headers=None, maxtries=None):
self.url = url.rstrip('/')
self.rpc_server = upload.GetRpcServer(self.url, auth_config, email)
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
def xsrf_token(self):
if (not self._xsrf_token_time or
(time.time() - self._xsrf_token_time) > 30*60):
self._xsrf_token_time = time.time()
self._xsrf_token = self.get(
'/xsrf_token',
extra_headers={'X-Requesting-XSRF-Token': '1'})
return self._xsrf_token
def get_pending_issues(self):
"""Returns an array of dict of all the pending issues on the server."""
# TODO: Convert this to use Rietveld::search(), defined below.
return json.loads(
self.get('/search?format=json&commit=2&closed=3&'
'keys_only=True&limit=1000&order=__key__'))['results']
def close_issue(self, issue):
"""Closes the Rietveld issue for this changelist."""
logging.info('closing issue %d' % issue)
self.post("/%d/close" % issue, [('xsrf_token', self.xsrf_token())])
def get_description(self, issue):
"""Returns the issue's description.
Converts any CRLF into LF and strip extraneous whitespace.
"""
return '\n'.join(self.get('/%d/description' % issue).strip().splitlines())
def get_issue_properties(self, issue, messages):
"""Returns all the issue's metadata as a dictionary."""
url = '/api/%d' % issue
if messages:
url += '?messages=true'
data = json.loads(self.get(url, retry_on_404=True))
data['description'] = '\n'.join(data['description'].strip().splitlines())
return data
def get_depends_on_patchset(self, issue, patchset):
"""Returns the patchset this patchset depends on if it exists."""
url = '/%d/patchset/%d/get_depends_on_patchset' % (issue, patchset)
resp = None
try:
resp = json.loads(self.post(url, []))
except (urllib2.HTTPError, ValueError):
# The get_depends_on_patchset endpoint does not exist on this Rietveld
# instance yet. Ignore the error and proceed.
# TODO(rmistry): Make this an error when all Rietveld instances have
# this endpoint.
pass
return resp
def get_patchset_properties(self, issue, patchset):
"""Returns the patchset properties."""
url = '/api/%d/%d' % (issue, patchset)
return json.loads(self.get(url))
def get_file_content(self, issue, patchset, item):
"""Returns the content of a new file.
Throws HTTP 302 exception if the file doesn't exist or is not a binary file.
"""
# content = 0 is the old file, 1 is the new file.
content = 1
url = '/%d/binary/%d/%d/%d' % (issue, patchset, item, content)
return self.get(url)
def get_file_diff(self, issue, patchset, item):
"""Returns the diff of the file.
Returns a useless diff for binary files.
"""
url = '/download/issue%d_%d_%d.diff' % (issue, patchset, item)
return self.get(url)
def get_patch(self, issue, patchset):
"""Returns a PatchSet object containing the details to apply this patch."""
props = self.get_patchset_properties(issue, patchset) or {}
out = []
for filename, state in props.get('files', {}).iteritems():
logging.debug('%s' % filename)
# If not status, just assume it's a 'M'. Rietveld often gets it wrong and
# just has status: null. Oh well.
status = state.get('status') or 'M'
if status[0] not in ('A', 'D', 'M', 'R'):
raise patch.UnsupportedPatchFormat(
filename, 'Change with status \'%s\' is not supported.' % status)
svn_props = self.parse_svn_properties(
state.get('property_changes', ''), filename)
if state.get('is_binary'):
if status[0] == 'D':
if status[0] != status.strip():
raise patch.UnsupportedPatchFormat(
filename, 'Deleted file shouldn\'t have property change.')
out.append(patch.FilePatchDelete(filename, state['is_binary']))
else:
content = self.get_file_content(issue, patchset, state['id'])
if not content:
# As a precaution due to a bug in upload.py for git checkout, refuse
# empty files. If it's empty, it's not a binary file.
raise patch.UnsupportedPatchFormat(
filename,
'Binary file is empty. Maybe the file wasn\'t uploaded in the '
'first place?')
out.append(patch.FilePatchBinary(
filename,
content,
svn_props,
is_new=(status[0] == 'A')))
continue
try:
diff = self.get_file_diff(issue, patchset, state['id'])
except urllib2.HTTPError, e:
if e.code == 404:
raise patch.UnsupportedPatchFormat(
filename, 'File doesn\'t have a diff.')
raise
# FilePatchDiff() will detect file deletion automatically.
p = patch.FilePatchDiff(filename, diff, svn_props)
out.append(p)
if status[0] == 'A':
# It won't be set for empty file.
p.is_new = True
if (len(status) > 1 and
status[1] == '+' and
not (p.source_filename or p.svn_properties)):
raise patch.UnsupportedPatchFormat(
filename, 'Failed to process the svn properties')
return patch.PatchSet(out)
@staticmethod
def parse_svn_properties(rietveld_svn_props, filename):
"""Returns a list of tuple [('property', 'newvalue')].
rietveld_svn_props is the exact format from 'svn diff'.
"""
rietveld_svn_props = rietveld_svn_props.splitlines()
svn_props = []
if not rietveld_svn_props:
return svn_props
# 1. Ignore svn:mergeinfo.
# 2. Accept svn:eol-style and svn:executable.
# 3. Refuse any other.
# \n
# Added: svn:ignore\n
# + LF\n
spacer = rietveld_svn_props.pop(0)
if spacer or not rietveld_svn_props:
# svn diff always put a spacer between the unified diff and property
# diff
raise patch.UnsupportedPatchFormat(
filename, 'Failed to parse svn properties.')
while rietveld_svn_props:
# Something like 'Added: svn:eol-style'. Note the action is localized.
# *sigh*.
action = rietveld_svn_props.pop(0)
match = re.match(r'^(\w+): (.+)$', action)
if not match or not rietveld_svn_props:
raise patch.UnsupportedPatchFormat(
filename,
'Failed to parse svn properties: %s, %s' % (action, svn_props))
if match.group(2) == 'svn:mergeinfo':
# Silently ignore the content.
rietveld_svn_props.pop(0)
continue
if match.group(1) not in ('Added', 'Modified'):
# Will fail for our French friends.
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property operation.')
if match.group(2) in ('svn:eol-style', 'svn:executable', 'svn:mime-type'):
# ' + foo' where foo is the new value. That's fragile.
content = rietveld_svn_props.pop(0)
match2 = re.match(r'^ \+ (.*)$', content)
if not match2:
raise patch.UnsupportedPatchFormat(
filename, 'Unsupported svn property format.')
svn_props.append((match.group(2), match2.group(1)))
return svn_props
def update_description(self, issue, description):
"""Sets the description for an issue on Rietveld."""
logging.info('new description for issue %d' % issue)
self.post('/%d/description' % issue, [
('description', description),
('xsrf_token', self.xsrf_token())])
def add_comment(self, issue, message, add_as_reviewer=False):
max_message = 10000
tail = 'โฆ\n(message too large)'
if len(message) > max_message:
message = message[:max_message-len(tail)] + tail
logging.info('issue %d; comment: %s' % (issue, message.strip()[:300]))
return self.post('/%d/publish' % issue, [
('xsrf_token', self.xsrf_token()),
('message', message),
('message_only', 'True'),
('add_as_reviewer', str(bool(add_as_reviewer))),
('send_mail', 'True'),
('no_redirect', 'True')])
def add_inline_comment(
self, issue, text, side, snapshot, patchset, patchid, lineno):
logging.info('add inline comment for issue %d' % issue)
return self.post('/inline_draft', [
('issue', str(issue)),
('text', text),
('side', side),
('snapshot', snapshot),
('patchset', str(patchset)),
('patch', str(patchid)),
('lineno', str(lineno))])
def set_flag(self, issue, patchset, flag, value):
return self.post('/%d/edit_flags' % issue, [
('last_patchset', str(patchset)),
('xsrf_token', self.xsrf_token()),
(flag, str(value))])
def search(
self,
owner=None, reviewer=None,
base=None,
closed=None, private=None, commit=None,
created_before=None, created_after=None,
modified_before=None, modified_after=None,
per_request=None, keys_only=False,
with_messages=False):
"""Yields search results."""
# These are expected to be strings.
string_keys = {
'owner': owner,
'reviewer': reviewer,
'base': base,
'created_before': created_before,
'created_after': created_after,
'modified_before': modified_before,
'modified_after': modified_after,
}
# These are either None, False or True.
three_state_keys = {
'closed': closed,
'private': private,
'commit': commit,
}
url = '/search?format=json'
# Sort the keys mainly to ease testing.
for key in sorted(string_keys):
value = string_keys[key]
if value:
url += '&%s=%s' % (key, urllib2.quote(value))
for key in sorted(three_state_keys):
value = three_state_keys[key]
if value is not None:
url += '&%s=%d' % (key, int(value) + 1)
if keys_only:
url += '&keys_only=True'
if with_messages:
url += '&with_messages=True'
if per_request:
url += '&limit=%d' % per_request
cursor = ''
while True:
output = self.get(url + cursor)
if output.startswith('<'):
# It's an error message. Return as no result.
break
data = json.loads(output) or {}
if not data.get('results'):
break
for i in data['results']:
yield i
cursor = '&cursor=%s' % data['cursor']
def trigger_try_jobs(
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
"""Requests new try jobs.
|builders_and_tests| is a map of builders: [tests] to run.
|master| is the name of the try master the builders belong to.
|category| is used to distinguish regular jobs and experimental jobs.
Returns the keys of the new TryJobResult entites.
"""
params = [
('reason', reason),
('clobber', 'True' if clobber else 'False'),
('builders', json.dumps(builders_and_tests)),
('xsrf_token', self.xsrf_token()),
('category', category),
]
if revision:
params.append(('revision', revision))
if master:
# Temporarily allow empty master names for old configurations. The try
# job will not be associated with a master name on rietveld. This is
# going to be deprecated.
params.append(('master', master))
return self.post('/%d/try/%d' % (issue, patchset), params)
def trigger_distributed_try_jobs(
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
"""Requests new try jobs.
|masters| is a map of masters: map of builders: [tests] to run.
|category| is used to distinguish regular jobs and experimental jobs.
"""
for (master, builders_and_tests) in masters.iteritems():
self.trigger_try_jobs(
issue, patchset, reason, clobber, revision, builders_and_tests,
master, category)
def get_pending_try_jobs(self, cursor=None, limit=100):
"""Retrieves the try job requests in pending state.
Returns a tuple of the list of try jobs and the cursor for the next request.
"""
url = '/get_pending_try_patchsets?limit=%d' % limit
extra = ('&cursor=' + cursor) if cursor else ''
data = json.loads(self.get(url + extra))
return data['jobs'], data['cursor']
def get(self, request_path, **kwargs):
kwargs.setdefault('payload', None)
return self._send(request_path, **kwargs)
def post(self, request_path, data, **kwargs):
ctype, body = upload.EncodeMultipartFormData(data, [])
return self._send(request_path, payload=body, content_type=ctype, **kwargs)
def _send(self, request_path, retry_on_404=False, **kwargs):
"""Sends a POST/GET to Rietveld. Returns the response body."""
# rpc_server.Send() assumes timeout=None by default; make sure it's set
# to something reasonable.
kwargs.setdefault('timeout', 15)
logging.debug('POSTing to %s, args %s.', request_path, kwargs)
try:
# Sadly, upload.py calls ErrorExit() which does a sys.exit(1) on HTTP
# 500 in AbstractRpcServer.Send().
old_error_exit = upload.ErrorExit
def trap_http_500(msg):
"""Converts an incorrect ErrorExit() call into a HTTPError exception."""
m = re.search(r'(50\d) Server Error', msg)
if m:
# Fake an HTTPError exception. Cheezy. :(
raise urllib2.HTTPError(
request_path, int(m.group(1)), msg, None, None)
old_error_exit(msg)
upload.ErrorExit = trap_http_500
for retry in xrange(self._maxtries):
try:
logging.debug('%s' % request_path)
result = self.rpc_server.Send(request_path, **kwargs)
# Sometimes GAE returns a HTTP 200 but with HTTP 500 as the content.
# How nice.
return result
except urllib2.HTTPError, e:
if retry >= (self._maxtries - 1):
raise
flake_codes = [500, 502, 503]
if retry_on_404:
flake_codes.append(404)
if e.code not in flake_codes:
raise
except urllib2.URLError, e:
if retry >= (self._maxtries - 1):
raise
if (not 'Name or service not known' in e.reason and
not 'EOF occurred in violation of protocol' in e.reason and
# On windows we hit weird bug http://crbug.com/537417
# with message '[Errno 10060] A connection attempt failed...'
not (sys.platform.startswith('win') and
isinstance(e.reason, socket.error) and
e.reason.errno == errno.ETIMEDOUT
)
):
# Usually internal GAE flakiness.
raise
except ssl.SSLError, e:
if retry >= (self._maxtries - 1):
raise
if not 'timed out' in str(e):
raise
# If reaching this line, loop again. Uses a small backoff.
time.sleep(min(10, 1+retry*2))
except urllib2.HTTPError as e:
print 'Request to %s failed: %s' % (e.geturl(), e.read())
raise
finally:
upload.ErrorExit = old_error_exit
# DEPRECATED.
Send = get
class OAuthRpcServer(object):
def __init__(self,
host,
client_email,
client_private_key,
private_key_password='notasecret',
user_agent=None,
timeout=None,
extra_headers=None):
"""Wrapper around httplib2.Http() that handles authentication.
client_email: email associated with the service account
client_private_key: encrypted private key, as a string
private_key_password: password used to decrypt the private key
"""
# Enforce https
host_parts = urlparse.urlparse(host)
if host_parts.scheme == 'https': # fine
self.host = host
elif host_parts.scheme == 'http':
upload.logging.warning('Changing protocol to https')
self.host = 'https' + host[4:]
else:
msg = 'Invalid url provided: %s' % host
upload.logging.error(msg)
raise ValueError(msg)
self.host = self.host.rstrip('/')
self.extra_headers = extra_headers or {}
if not oa2client.HAS_OPENSSL:
logging.error("No support for OpenSSL has been found, "
"OAuth2 support requires it.")
logging.error("Installing pyopenssl will probably solve this issue.")
raise RuntimeError('No OpenSSL support')
self.creds = oa2client.SignedJwtAssertionCredentials(
client_email,
client_private_key,
'https://www.googleapis.com/auth/userinfo.email',
private_key_password=private_key_password,
user_agent=user_agent)
self._http = self.creds.authorize(httplib2.Http(timeout=timeout))
def Send(self,
request_path,
payload=None,
content_type='application/octet-stream',
timeout=None,
extra_headers=None,
**kwargs):
"""Send a POST or GET request to the server.
Args:
request_path: path on the server to hit. This is concatenated with the
value of 'host' provided to the constructor.
payload: request is a POST if not None, GET otherwise
timeout: in seconds
extra_headers: (dict)
"""
# This method signature should match upload.py:AbstractRpcServer.Send()
method = 'GET'
headers = self.extra_headers.copy()
headers.update(extra_headers or {})
if payload is not None:
method = 'POST'
headers['Content-Type'] = content_type
prev_timeout = self._http.timeout
try:
if timeout:
self._http.timeout = timeout
# TODO(pgervais) implement some kind of retry mechanism (see upload.py).
url = self.host + request_path
if kwargs:
url += "?" + urllib.urlencode(kwargs)
# This weird loop is there to detect when the OAuth2 token has expired.
# This is specific to appengine *and* rietveld. It relies on the
# assumption that a 302 is triggered only by an expired OAuth2 token. This
# prevents any usage of redirections in pages accessed this way.
# This variable is used to make sure the following loop runs only twice.
redirect_caught = False
while True:
try:
ret = self._http.request(url,
method=method,
body=payload,
headers=headers,
redirections=0)
except httplib2.RedirectLimit:
if redirect_caught or method != 'GET':
logging.error('Redirection detected after logging in. Giving up.')
raise
redirect_caught = True
logging.debug('Redirection detected. Trying to log in again...')
self.creds.access_token = None
continue
break
return ret[1]
finally:
self._http.timeout = prev_timeout
class JwtOAuth2Rietveld(Rietveld):
"""Access to Rietveld using OAuth authentication.
This class is supposed to be used only by bots, since this kind of
access is restricted to service accounts.
"""
# The parent__init__ is not called on purpose.
# pylint: disable=W0231
def __init__(self,
url,
client_email,
client_private_key_file,
private_key_password=None,
extra_headers=None,
maxtries=None):
if private_key_password is None: # '' means 'empty password'
private_key_password = 'notasecret'
self.url = url.rstrip('/')
bot_url = self.url
if self.url.endswith('googleplex.com'):
bot_url = self.url + '/bots'
with open(client_private_key_file, 'rb') as f:
client_private_key = f.read()
logging.info('Using OAuth login: %s' % client_email)
self.rpc_server = OAuthRpcServer(bot_url,
client_email,
client_private_key,
private_key_password=private_key_password,
extra_headers=extra_headers or {})
self._xsrf_token = None
self._xsrf_token_time = None
self._maxtries = maxtries or 40
class CachingRietveld(Rietveld):
"""Caches the common queries.
Not to be used in long-standing processes, like the commit queue.
"""
def __init__(self, *args, **kwargs):
super(CachingRietveld, self).__init__(*args, **kwargs)
self._cache = {}
def _lookup(self, function_name, args, update):
"""Caches the return values corresponding to the arguments.
It is important that the arguments are standardized, like None vs False.
"""
function_cache = self._cache.setdefault(function_name, {})
if args not in function_cache:
function_cache[args] = update(*args)
return copy.deepcopy(function_cache[args])
def get_description(self, issue):
return self._lookup(
'get_description',
(issue,),
super(CachingRietveld, self).get_description)
def get_issue_properties(self, issue, messages):
"""Returns the issue properties.
Because in practice the presubmit checks often ask without messages first
and then with messages, always ask with messages and strip off if not asked
for the messages.
"""
# It's a tad slower to request with the message but it's better than
# requesting the properties twice.
data = self._lookup(
'get_issue_properties',
(issue, True),
super(CachingRietveld, self).get_issue_properties)
if not messages:
# Assumes self._lookup uses deepcopy.
del data['messages']
return data
def get_patchset_properties(self, issue, patchset):
return self._lookup(
'get_patchset_properties',
(issue, patchset),
super(CachingRietveld, self).get_patchset_properties)
class ReadOnlyRietveld(object):
"""
Only provides read operations, and simulates writes locally.
Intentionally do not inherit from Rietveld to avoid any write-issuing
logic to be invoked accidentally.
"""
# Dictionary of local changes, indexed by issue number as int.
_local_changes = {}
def __init__(self, *args, **kwargs):
# We still need an actual Rietveld instance to issue reads, just keep
# it hidden.
self._rietveld = Rietveld(*args, **kwargs)
@classmethod
def _get_local_changes(cls, issue):
"""Returns dictionary of local changes for |issue|, if any."""
return cls._local_changes.get(issue, {})
@property
def url(self):
return self._rietveld.url
def get_pending_issues(self):
pending_issues = self._rietveld.get_pending_issues()
# Filter out issues we've closed or unchecked the commit checkbox.
return [issue for issue in pending_issues
if not self._get_local_changes(issue).get('closed', False) and
self._get_local_changes(issue).get('commit', True)]
def close_issue(self, issue): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: closing issue %d' % issue)
ReadOnlyRietveld._local_changes.setdefault(issue, {})['closed'] = True
def get_issue_properties(self, issue, messages):
data = self._rietveld.get_issue_properties(issue, messages)
data.update(self._get_local_changes(issue))
return data
def get_patchset_properties(self, issue, patchset):
return self._rietveld.get_patchset_properties(issue, patchset)
def get_depends_on_patchset(self, issue, patchset):
return self._rietveld.get_depends_on_patchset(issue, patchset)
def get_patch(self, issue, patchset):
return self._rietveld.get_patch(issue, patchset)
def update_description(self, issue, description): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: new description for issue %d: %s' %
(issue, description))
def add_comment(self, # pylint:disable=R0201
issue,
message,
add_as_reviewer=False):
logging.info('ReadOnlyRietveld: posting comment "%s" to issue %d' %
(message, issue))
def set_flag(self, issue, patchset, flag, value): # pylint:disable=R0201
logging.info('ReadOnlyRietveld: setting flag "%s" to "%s" for issue %d' %
(flag, value, issue))
ReadOnlyRietveld._local_changes.setdefault(issue, {})[flag] = value
def trigger_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, builders_and_tests,
master=None, category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(builders_and_tests, issue))
def trigger_distributed_try_jobs( # pylint:disable=R0201
self, issue, patchset, reason, clobber, revision, masters,
category='cq'):
logging.info('ReadOnlyRietveld: triggering try jobs %r for issue %d' %
(masters, issue))
| bsd-3-clause |
abhikumar22/MYBLOG | blg/Lib/site-packages/django-1.11.7-py3.6.egg/django/contrib/gis/geos/prepared.py | 180 | 1575 | from .base import GEOSBase
from .prototypes import prepared as capi
class PreparedGeometry(GEOSBase):
"""
A geometry that is prepared for performing certain operations.
At the moment this includes the contains covers, and intersects
operations.
"""
ptr_type = capi.PREPGEOM_PTR
destructor = capi.prepared_destroy
def __init__(self, geom):
# Keeping a reference to the original geometry object to prevent it
# from being garbage collected which could then crash the prepared one
# See #21662
self._base_geom = geom
from .geometry import GEOSGeometry
if not isinstance(geom, GEOSGeometry):
raise TypeError
self.ptr = capi.geos_prepare(geom.ptr)
def contains(self, other):
return capi.prepared_contains(self.ptr, other.ptr)
def contains_properly(self, other):
return capi.prepared_contains_properly(self.ptr, other.ptr)
def covers(self, other):
return capi.prepared_covers(self.ptr, other.ptr)
def intersects(self, other):
return capi.prepared_intersects(self.ptr, other.ptr)
def crosses(self, other):
return capi.prepared_crosses(self.ptr, other.ptr)
def disjoint(self, other):
return capi.prepared_disjoint(self.ptr, other.ptr)
def overlaps(self, other):
return capi.prepared_overlaps(self.ptr, other.ptr)
def touches(self, other):
return capi.prepared_touches(self.ptr, other.ptr)
def within(self, other):
return capi.prepared_within(self.ptr, other.ptr)
| gpl-3.0 |
0x27/clusterd | src/platform/tomcat/auxiliary/info_dump.py | 6 | 1972 | from src.platform.tomcat.authenticate import checkAuth
from src.platform.tomcat.interfaces import TINTERFACES
from auxiliary import Auxiliary
from log import LOG
import utility
class Auxiliary:
""" The Manager application for Tomcat has a nifty fingerprinting
app that allows us to retrieve host OS, versioning, arch, etc.
which may aid in targeting payloads.
"""
def __init__(self):
self.name = 'Gather Tomcat info'
self.versions = ['Any']
self.flag = 'tc-info'
def check(self, fingerprint):
"""
"""
if fingerprint.title == TINTERFACES.MAN:
return True
return False
def run(self, fingerengine, fingerprint):
utility.Msg("Attempting to retrieve Tomcat info...")
base = "http://{0}:{1}".format(fingerengine.options.ip,
fingerprint.port)
relative = '/manager/serverinfo'
if fingerprint.version in ["7.0", "8.0"]:
relative = '/manager/text/serverinfo'
url = base + relative
response = utility.requests_get(url)
if response.status_code == 401:
utility.Msg("Host %s:%s requires auth, checking..." %
(fingerengine.options.ip, fingerprint.port), LOG.DEBUG)
cookies = checkAuth(fingerengine.options.ip, fingerprint.port,
fingerprint.title, fingerprint.version)
if cookies:
response = utility.requests_get(url, cookies=cookies[0],
auth=cookies[1])
else:
utility.Msg("Could not get auth for %s:%s" %
(fingerengine.options.ip, fingerprint.port), LOG.ERROR)
return
if response.status_code == 200:
info = response.content.split('\n')[1:-1]
for entry in info:
utility.Msg(entry)
| mit |
josephnoir/RIOT | cpu/esp8266/vendor/esp-idf/partition_table/gen_esp32part.py | 6 | 14010 | #!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import struct
import sys
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
__version__ = '1.0'
quiet = False
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
if not quiet:
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
for line_no in range(len(lines)):
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = 0x5000 # first offset after partition table
for e in res:
if e.offset is None:
pad_to = 0x10000 if e.type == PartitionDefinition.APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check for overlaps
last = None
for p in sorted(self, key=lambda x:x.offset):
if p.offset < 0x5000:
raise InputError("Partition offset 0x%x is below 0x5000" % p.offset)
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset, last.offset, last.offset+last.size-1))
last = p
@classmethod
def from_binary(cls, b):
result = cls()
for o in range(0,len(b),32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
if len(result )>= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = [ "# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags" ]
rows += [ x.to_csv(simple_formatting) for x in self ]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app" : APP_TYPE,
"data" : DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE : {
"factory" : 0x00,
"test" : 0x20,
},
DATA_TYPE : {
"ota" : 0x00,
"phy" : 0x01,
"nvs" : 0x02,
"coredump" : 0x03,
"esphttpd" : 0x80,
"fat" : 0x81,
"spiffs" : 0x82,
},
}
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE : 0x1000,
DATA_TYPE : 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted" : 0
}
# add subtypes for the 16 OTA slot values ("ota_XXX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [ f.strip() for f in line_w_defaults.split(",") ]
res = PartitionDefinition()
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type, self.subtype or 0,
maybe_hex(self.offset), maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type, self.subtype, self.offset or -1, self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, self.TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, self.SUBTYPES.get(self.type, {}))
def parse_address(self, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval)
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
STRUCT_FORMAT = "<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag,bit in cls.FLAGS.items():
if flags & (1<<bit):
setattr(res, flag, True)
flags &= ~(1<<bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [ flag for flag in self.FLAGS.keys() if getattr(self, flag) ]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [ (0x100000, "M"), (0x400, "K") ]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k,v in keywords.items():
if simple_formatting == False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([ self.name,
lookup_keyword(self.type, self.TYPES),
lookup_keyword(self.subtype, self.SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords={}):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [ ("k",1024), ("m",1024*1024) ]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--verify', '-v', help='Verify partition table fields', default=True, action='store_false')
parser.add_argument('--quiet', '-q', help="Don't print status messages to stderr", action='store_true')
parser.add_argument('input', help='Path to CSV or binary file to parse. Will use stdin if omitted.', type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use stdout if omitted, unless the --display argument is also passed (in which case only the summary is printed.)',
nargs='?',
default='-')
args = parser.parse_args()
quiet = args.quiet
input = args.input.read()
input_is_binary = input[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input)
else:
input = input.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input)
if args.verify:
status("Verifying table...")
table.verify()
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
with sys.stdout.buffer if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| lgpl-2.1 |
xapi-project/xen-api-sdk | python/samples/vm_start_async.py | 8 | 3389 | #!/usr/bin/env python
# Copyright (c) Citrix Systems, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Simple example to demonstrate how to use an aasynchronous operation,
# namely the asynchronous version of the VM start method.
# The example assumes the presence of a halted VM named "new", starts
# the VM, retrieves the task reference and queries the task's status.
import pprint
import time
import sys
import XenAPI
def main(session):
print "Looking for a halted VM..."
vms = session.xenapi.VM.get_all_records()
vm = None
for vm_ref in vms:
vm_rec = vms[vm_ref]
if not vm_rec['is_a_template'] and not vm_rec['is_control_domain']\
and vm_rec["power_state"] == "Halted":
print "Found it:"
pprint.pprint(vm_rec)
vm = vm_ref
break
if vm is None:
print "Unable to find a halted VM"
return
print "Attempting to start the halted VM asynchronously"
task = session.xenapi.Async.VM.start(vm, False, True)
task_record = session.xenapi.task.get_record(task)
print "The initial contents of the task record:"
pprint.pprint(task_record)
print "Waiting for the task to complete..."
while session.xenapi.task.get_status(task) == "pending":
time.sleep(1)
task_record = session.xenapi.task.get_record(task)
print "The final contents of the task record:"
pprint.pprint(task_record)
if __name__ == "__main__":
if len(sys.argv) != 4:
print "Usage:"
print sys.argv[0], " <url> <username> <password>"
sys.exit(1)
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
# First acquire a valid session by logging in:
new_session = XenAPI.Session(url)
try:
new_session.xenapi.login_with_password(username, password, "1.0", "xen-api-scripts-vm-start-async.py")
except XenAPI.Failure as f:
print "Failed to acquire a session: %s" % f.details
sys.exit(1)
try:
main(new_session)
finally:
new_session.xenapi.session.logout()
| bsd-2-clause |
D-L/SimpleBookMarks | src/tornado/auth.py | 15 | 57309 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import Future, chain_future, return_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.util import bytes_type, u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = Future()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See `GoogleMixin` below for a customized example (which also
includes OAuth support).
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, self.async_callback(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` and `FriendFeedMixin` below for example implementations,
or `GoogleMixin` for an OAuth/OpenID hybrid.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some sevices (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
self.async_callback(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
self.async_callback(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id
}
if extra_params:
args.update(extra_params)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
self.async_callback(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = self.async_callback(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then copy
your Consumer Key and Consumer Secret to the application
`~tornado.web.Application.settings` ``friendfeed_consumer_key``
and ``friendfeed_consumer_secret``. Use this mixin on the handler
for the URL you registered as your application's Callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedLoginHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user()` includes the
attributes ``username``, ``name``, and ``description`` in addition to
``access_token``. You should save the access token with the user;
it is required to make requests on behalf of the user later with
`friendfeed_request()`.
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
@_auth_return_future
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned
through that process includes an ``access_token`` attribute that
can be used to make authenticated requests via this
method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
callback = self.async_callback(self._on_friendfeed_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token, callback):
user = yield self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token)
if user:
user["username"] = user["id"]
callback(user)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
No application registration is necessary to use Google for
authentication or to access Google resources on behalf of a user.
Google implements both OpenID and OAuth in a hybrid mode. If you
just need the user's identity, use
`~OpenIdMixin.authenticate_redirect`. If you need to make
requests to Google on behalf of the user, use
`authorize_redirect`. On return, parse the response with
`~OpenIdMixin.get_authenticated_user`. We send a dict containing
the values for the user, including ``email``, ``name``, and
``locale``.
Example usage::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
@return_future
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources which can be used in the ``oauth_scope``
argument are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.items():
if name.startswith("openid.ns.") and \
values[-1] == b"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = self.get_auth_http_client()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
self.async_callback(self._on_access_token, callback))
else:
chain_future(OpenIdMixin.get_authenticated_user(self),
callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user_future(self, access_token):
return OpenIdMixin.get_authenticated_user(self)
class FacebookMixin(object):
"""Facebook Connect authentication.
*Deprecated:* New applications should use `FacebookGraphMixin`
below instead of this class. This class does not support the
Future-based interface seen on other classes in this module.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
``facebook_api_key`` and ``facebook_secret``.
When your application is set up, you can use this mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self.async_callback(self._on_auth))
return
yield self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by `get_authenticated_user` includes the
attributes ``facebook_uid`` and ``name`` in addition to session attributes
like ``session_key``. You should save the session key with the user; it is
required to make requests on behalf of the user later with
`facebook_request`.
"""
@return_future
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None, callback=None):
"""Authenticates/installs this app for the current user.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode_type, bytes_type)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib_parse.urlencode(args))
callback()
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None, callback=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
return self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions,
callback=callback)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=self.async_callback(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square,"
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self.async_callback(self._on_stream),
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http.fetch(url, callback=self.async_callback(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
gen_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
gen_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
gen_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode_type):
body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://graph.facebook.com/oauth/authorize?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
self.async_callback(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=self.async_callback(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = self.async_callback(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
| gpl-2.0 |
planetserver/webclient | js/proj4js/tools/mergejs.py | 161 | 7973 | #!/usr/bin/env python
#
# Merge multiple JavaScript source code files into one.
#
# Usage:
# This script requires source files to have dependencies specified in them.
#
# Dependencies are specified with a comment of the form:
#
# // @requires <file path>
#
# e.g.
#
# // @requires Geo/DataSource.js
#
# or (ideally) within a class comment definition
#
# /**
# * @class
# *
# * @requires OpenLayers/Layer.js
# */
#
# This script should be executed like so:
#
# mergejs.py <output.js> <directory> [...]
#
# e.g.
#
# mergejs.py openlayers.js Geo/ CrossBrowser/
#
# This example will cause the script to walk the `Geo` and
# `CrossBrowser` directories--and subdirectories thereof--and import
# all `*.js` files encountered. The dependency declarations will be extracted
# and then the source code from imported files will be output to
# a file named `openlayers.js` in an order which fulfils the dependencies
# specified.
#
#
# Note: This is a very rough initial version of this code.
#
# -- Copyright 2005-2007 MetaCarta, Inc. / OpenLayers project --
#
# TODO: Allow files to be excluded. e.g. `Crossbrowser/DebugMode.js`?
# TODO: Report error when dependency can not be found rather than KeyError.
import re
import os
import sys
import glob
SUFFIX_JAVASCRIPT = ".js"
RE_REQUIRE = "@requires (.*)\n" # TODO: Ensure in comment?
class SourceFile:
"""
Represents a Javascript source code file.
"""
def __init__(self, filepath, source):
"""
"""
self.filepath = filepath
self.source = source
self.requiredBy = []
def _getRequirements(self):
"""
Extracts the dependencies specified in the source code and returns
a list of them.
"""
# TODO: Cache?
return re.findall(RE_REQUIRE, self.source)
requires = property(fget=_getRequirements, doc="")
def usage(filename):
"""
Displays a usage message.
"""
print "%s [-c <config file>] <output.js> <directory> [...]" % filename
class Config:
"""
Represents a parsed configuration file.
A configuration file should be of the following form:
[first]
3rd/prototype.js
core/application.js
core/params.js
[last]
core/api.js
[exclude]
3rd/logger.js
All headings are required.
The files listed in the `first` section will be forced to load
*before* all other files (in the order listed). The files in `last`
section will be forced to load *after* all the other files (in the
order listed).
The files list in the `exclude` section will not be imported.
"""
def __init__(self, filename):
"""
Parses the content of the named file and stores the values.
"""
lines = [line.strip() # Assumes end-of-line character is present
for line in open(filename)
if line.strip()] # Skip blank lines
self.forceFirst = lines[lines.index("[first]") + 1:lines.index("[last]")]
self.forceLast = lines[lines.index("[last]") + 1:lines.index("[include]")]
self.include = lines[lines.index("[include]") + 1:lines.index("[exclude]")]
self.exclude = lines[lines.index("[exclude]") + 1:]
def run (sourceDirectory, outputFilename = None, configFile = None):
cfg = None
if configFile:
cfg = Config(configFile)
allFiles = []
## Find all the Javascript source files
for root, dirs, files in os.walk(sourceDirectory):
for filename in files:
if filename.endswith(SUFFIX_JAVASCRIPT) and not filename.startswith("."):
filepath = os.path.join(root, filename)[len(sourceDirectory)+1:]
filepath = filepath.replace("\\", "/")
if cfg and cfg.include:
include = False
for included in cfg.include:
if glob.fnmatch.fnmatch(filepath, included):
include = True
if include or filepath in cfg.forceFirst:
allFiles.append(filepath)
elif (not cfg) or (filepath not in cfg.exclude):
exclude = False
for excluded in cfg.exclude:
if glob.fnmatch.fnmatch(filepath, excluded):
exclude = True
if not exclude:
allFiles.append(filepath)
## Header inserted at the start of each file in the output
HEADER = "/* " + "=" * 70 + "\n %s\n" + " " + "=" * 70 + " */\n\n"
files = {}
order = [] # List of filepaths to output, in a dependency satisfying order
## Import file source code
## TODO: Do import when we walk the directories above?
for filepath in allFiles:
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
print
from toposort import toposort
complete = False
resolution_pass = 1
while not complete:
order = [] # List of filepaths to output, in a dependency satisfying order
nodes = []
routes = []
## Resolve the dependencies
print "Resolution pass %s... " % resolution_pass
resolution_pass += 1
for filepath, info in files.items():
nodes.append(filepath)
for neededFilePath in info.requires:
routes.append((neededFilePath, filepath))
for dependencyLevel in toposort(nodes, routes):
for filepath in dependencyLevel:
order.append(filepath)
if not files.has_key(filepath):
print "Importing: %s" % filepath
fullpath = os.path.join(sourceDirectory, filepath)
content = open(fullpath, "U").read() # TODO: Ensure end of line @ EOF?
files[filepath] = SourceFile(filepath, content) # TODO: Chop path?
# Double check all dependencies have been met
complete = True
try:
for fp in order:
if max([order.index(rfp) for rfp in files[fp].requires] +
[order.index(fp)]) != order.index(fp):
complete = False
except:
complete = False
print
## Move forced first and last files to the required position
if cfg:
print "Re-ordering files..."
order = cfg.forceFirst + [item
for item in order
if ((item not in cfg.forceFirst) and
(item not in cfg.forceLast))] + cfg.forceLast
print
## Output the files in the determined order
result = []
for fp in order:
f = files[fp]
print "Exporting: ", f.filepath
result.append(HEADER % f.filepath)
source = f.source
result.append(source)
if not source.endswith("\n"):
result.append("\n")
print "\nTotal files merged: %d " % len(files)
if outputFilename:
print "\nGenerating: %s" % (outputFilename)
open(outputFilename, "w").write("".join(result))
return "".join(result)
if __name__ == "__main__":
import getopt
options, args = getopt.getopt(sys.argv[1:], "-c:")
try:
outputFilename = args[0]
except IndexError:
usage(sys.argv[0])
raise SystemExit
else:
sourceDirectory = args[1]
if not sourceDirectory:
usage(sys.argv[0])
raise SystemExit
configFile = None
if options and options[0][0] == "-c":
configFile = options[0][1]
print "Parsing configuration file: %s" % filename
run( sourceDirectory, outputFilename, configFile )
| gpl-3.0 |
Mazecreator/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 79 | 2464 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.generator_io import generator_input_fn
| apache-2.0 |
themass/zulip | api/zulip/__init__.py | 115 | 17705 | # -*- coding: utf-8 -*-
# Copyright ยฉ 2012-2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import simplejson
import requests
import time
import traceback
import urlparse
import sys
import os
import optparse
import platform
import urllib
import random
from distutils.version import LooseVersion
from ConfigParser import SafeConfigParser
import logging
__version__ = "0.2.4"
logger = logging.getLogger(__name__)
# Check that we have a recent enough version
# Older versions don't provide the 'json' attribute on responses.
assert(LooseVersion(requests.__version__) >= LooseVersion('0.12.1'))
# In newer versions, the 'json' attribute is a function, not a property
requests_json_is_function = callable(requests.Response.json)
API_VERSTRING = "v1/"
class CountingBackoff(object):
def __init__(self, maximum_retries=10, timeout_success_equivalent=None):
self.number_of_retries = 0
self.maximum_retries = maximum_retries
self.timeout_success_equivalent = timeout_success_equivalent
self.last_attempt_time = 0
def keep_going(self):
self._check_success_timeout()
return self.number_of_retries < self.maximum_retries
def succeed(self):
self.number_of_retries = 0
self.last_attempt_time = time.time()
def fail(self):
self._check_success_timeout()
self.number_of_retries = min(self.number_of_retries + 1,
self.maximum_retries)
self.last_attempt_time = time.time()
def _check_success_timeout(self):
if (self.timeout_success_equivalent is not None
and self.last_attempt_time != 0
and time.time() - self.last_attempt_time > self.timeout_success_equivalent):
self.number_of_retries = 0
class RandomExponentialBackoff(CountingBackoff):
def fail(self):
super(RandomExponentialBackoff, self).fail()
# Exponential growth with ratio sqrt(2); compute random delay
# between x and 2x where x is growing exponentially
delay_scale = int(2 ** (self.number_of_retries / 2.0 - 1)) + 1
delay = delay_scale + random.randint(1, delay_scale)
message = "Sleeping for %ss [max %s] before retrying." % (delay, delay_scale * 2)
try:
logger.warning(message)
except NameError:
print message
time.sleep(delay)
def _default_client():
return "ZulipPython/" + __version__
def generate_option_group(parser, prefix=''):
group = optparse.OptionGroup(parser, 'Zulip API configuration')
group.add_option('--%ssite' % (prefix,),
dest="zulip_site",
help="Zulip server URI",
default=None)
group.add_option('--%sapi-key' % (prefix,),
dest="zulip_api_key",
action='store')
group.add_option('--%suser' % (prefix,),
dest='zulip_email',
help='Email address of the calling bot or user.')
group.add_option('--%sconfig-file' % (prefix,),
action='store',
dest="zulip_config_file",
help='Location of an ini file containing the\nabove information. (default ~/.zuliprc)')
group.add_option('-v', '--verbose',
action='store_true',
help='Provide detailed output.')
group.add_option('--%sclient' % (prefix,),
action='store',
default=None,
dest="zulip_client",
help=optparse.SUPPRESS_HELP)
return group
def init_from_options(options, client=None):
if options.zulip_client is not None:
client = options.zulip_client
elif client is None:
client = _default_client()
return Client(email=options.zulip_email, api_key=options.zulip_api_key,
config_file=options.zulip_config_file, verbose=options.verbose,
site=options.zulip_site, client=client)
def get_default_config_filename():
config_file = os.path.join(os.environ["HOME"], ".zuliprc")
if (not os.path.exists(config_file) and
os.path.exists(os.path.join(os.environ["HOME"], ".humbugrc"))):
raise RuntimeError("The Zulip API configuration file is now ~/.zuliprc; please run:\n\n mv ~/.humbugrc ~/.zuliprc\n")
return config_file
class Client(object):
def __init__(self, email=None, api_key=None, config_file=None,
verbose=False, retry_on_errors=True,
site=None, client=None):
if client is None:
client = _default_client()
if None in (api_key, email):
if config_file is None:
config_file = get_default_config_filename()
if not os.path.exists(config_file):
raise RuntimeError("api_key or email not specified and %s does not exist"
% (config_file,))
config = SafeConfigParser()
with file(config_file, 'r') as f:
config.readfp(f, config_file)
if api_key is None:
api_key = config.get("api", "key")
if email is None:
email = config.get("api", "email")
if site is None and config.has_option("api", "site"):
site = config.get("api", "site")
self.api_key = api_key
self.email = email
self.verbose = verbose
if site is not None:
if not site.startswith("http"):
site = "https://" + site
# Remove trailing "/"s from site to simplify the below logic for adding "/api"
site = site.rstrip("/")
self.base_url = site
else:
self.base_url = "https://api.zulip.com"
if self.base_url != "https://api.zulip.com" and not self.base_url.endswith("/api"):
self.base_url += "/api"
self.base_url += "/"
self.retry_on_errors = retry_on_errors
self.client_name = client
def get_user_agent(self):
vendor = ''
vendor_version = ''
try:
vendor = platform.system()
vendor_version = platform.release()
except IOError:
# If the calling process is handling SIGCHLD, platform.system() can
# fail with an IOError. See http://bugs.python.org/issue9127
pass
if vendor == "Linux":
vendor, vendor_version, dummy = platform.linux_distribution()
elif vendor == "Windows":
vendor_version = platform.win32_ver()[1]
elif vendor == "Darwin":
vendor_version = platform.mac_ver()[0]
return "{client_name} ({vendor}; {vendor_version})".format(
client_name=self.client_name,
vendor=vendor,
vendor_version=vendor_version,
)
def do_api_query(self, orig_request, url, method="POST", longpolling = False):
request = {}
for (key, val) in orig_request.iteritems():
if not (isinstance(val, str) or isinstance(val, unicode)):
request[key] = simplejson.dumps(val)
else:
request[key] = val
query_state = {
'had_error_retry': False,
'request': request,
'failures': 0,
}
def error_retry(error_string):
if not self.retry_on_errors or query_state["failures"] >= 10:
return False
if self.verbose:
if not query_state["had_error_retry"]:
sys.stdout.write("zulip API(%s): connection error%s -- retrying." % \
(url.split(API_VERSTRING, 2)[0], error_string,))
query_state["had_error_retry"] = True
else:
sys.stdout.write(".")
sys.stdout.flush()
query_state["request"]["dont_block"] = simplejson.dumps(True)
time.sleep(1)
query_state["failures"] += 1
return True
def end_error_retry(succeeded):
if query_state["had_error_retry"] and self.verbose:
if succeeded:
print "Success!"
else:
print "Failed!"
while True:
try:
if method == "GET":
kwarg = "params"
else:
kwarg = "data"
kwargs = {kwarg: query_state["request"]}
res = requests.request(
method,
urlparse.urljoin(self.base_url, url),
auth=requests.auth.HTTPBasicAuth(self.email,
self.api_key),
verify=True, timeout=90,
headers={"User-agent": self.get_user_agent()},
**kwargs)
# On 50x errors, try again after a short sleep
if str(res.status_code).startswith('5'):
if error_retry(" (server %s)" % (res.status_code,)):
continue
# Otherwise fall through and process the python-requests error normally
except (requests.exceptions.Timeout, requests.exceptions.SSLError) as e:
# Timeouts are either a Timeout or an SSLError; we
# want the later exception handlers to deal with any
# non-timeout other SSLErrors
if (isinstance(e, requests.exceptions.SSLError) and
str(e) != "The read operation timed out"):
raise
if longpolling:
# When longpolling, we expect the timeout to fire,
# and the correct response is to just retry
continue
else:
end_error_retry(False)
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except requests.exceptions.ConnectionError:
if error_retry(""):
continue
end_error_retry(False)
return {'msg': "Connection error:\n%s" % traceback.format_exc(),
"result": "connection-error"}
except Exception:
# We'll split this out into more cases as we encounter new bugs.
return {'msg': "Unexpected error:\n%s" % traceback.format_exc(),
"result": "unexpected-error"}
try:
if requests_json_is_function:
json_result = res.json()
else:
json_result = res.json
except Exception:
json_result = None
if json_result is not None:
end_error_retry(True)
return json_result
end_error_retry(False)
return {'msg': "Unexpected error from the server", "result": "http-error",
"status_code": res.status_code}
@classmethod
def _register(cls, name, url=None, make_request=(lambda request={}: request),
method="POST", computed_url=None, **query_kwargs):
if url is None:
url = name
def call(self, *args, **kwargs):
request = make_request(*args, **kwargs)
if computed_url is not None:
req_url = computed_url(request)
else:
req_url = url
return self.do_api_query(request, API_VERSTRING + req_url, method=method, **query_kwargs)
call.func_name = name
setattr(cls, name, call)
def call_on_each_event(self, callback, event_types=None, narrow=[]):
def do_register():
while True:
if event_types is None:
res = self.register()
else:
res = self.register(event_types=event_types, narrow=narrow)
if 'error' in res.get('result'):
if self.verbose:
print "Server returned error:\n%s" % res['msg']
time.sleep(1)
else:
return (res['queue_id'], res['last_event_id'])
queue_id = None
while True:
if queue_id is None:
(queue_id, last_event_id) = do_register()
res = self.get_events(queue_id=queue_id, last_event_id=last_event_id)
if 'error' in res.get('result'):
if res["result"] == "http-error":
if self.verbose:
print "HTTP error fetching events -- probably a server restart"
elif res["result"] == "connection-error":
if self.verbose:
print "Connection error fetching events -- probably server is temporarily down?"
else:
if self.verbose:
print "Server returned error:\n%s" % res["msg"]
if res["msg"].startswith("Bad event queue id:"):
# Our event queue went away, probably because
# we were asleep or the server restarted
# abnormally. We may have missed some
# events while the network was down or
# something, but there's not really anything
# we can do about it other than resuming
# getting new ones.
#
# Reset queue_id to register a new event queue.
queue_id = None
# TODO: Make this back off once it's more reliable
time.sleep(1)
continue
for event in res['events']:
last_event_id = max(last_event_id, int(event['id']))
callback(event)
def call_on_each_message(self, callback):
def event_callback(event):
if event['type'] == 'message':
callback(event['message'])
self.call_on_each_event(event_callback, ['message'])
def _mk_subs(streams, **kwargs):
result = kwargs
result['subscriptions'] = streams
return result
def _mk_rm_subs(streams):
return {'delete': streams}
def _mk_deregister(queue_id):
return {'queue_id': queue_id}
def _mk_events(event_types=None, narrow=[]):
if event_types is None:
return dict()
return dict(event_types=event_types, narrow=narrow)
def _kwargs_to_dict(**kwargs):
return kwargs
class ZulipStream(object):
"""
A Zulip stream-like object
"""
def __init__(self, type, to, subject, **kwargs):
self.client = Client(**kwargs)
self.type = type
self.to = to
self.subject = subject
def write(self, content):
message = {"type": self.type,
"to": self.to,
"subject": self.subject,
"content": content}
self.client.send_message(message)
def flush(self):
pass
Client._register('send_message', url='messages', make_request=(lambda request: request))
Client._register('update_message', method='PATCH', url='messages', make_request=(lambda request: request))
Client._register('get_messages', method='GET', url='messages/latest', longpolling=True)
Client._register('get_events', url='events', method='GET', longpolling=True, make_request=(lambda **kwargs: kwargs))
Client._register('register', make_request=_mk_events)
Client._register('export', method='GET', url='export')
Client._register('deregister', url="events", method="DELETE", make_request=_mk_deregister)
Client._register('get_profile', method='GET', url='users/me')
Client._register('get_streams', method='GET', url='streams', make_request=_kwargs_to_dict)
Client._register('get_members', method='GET', url='users')
Client._register('list_subscriptions', method='GET', url='users/me/subscriptions')
Client._register('add_subscriptions', url='users/me/subscriptions', make_request=_mk_subs)
Client._register('remove_subscriptions', method='PATCH', url='users/me/subscriptions', make_request=_mk_rm_subs)
Client._register('get_subscribers', method='GET',
computed_url=lambda request: 'streams/%s/members' % (urllib.quote(request['stream'], safe=''),),
make_request=_kwargs_to_dict)
Client._register('render_message', method='GET', url='messages/render')
Client._register('create_user', method='POST', url='users')
| apache-2.0 |
jmhodges/letsencrypt | letsencrypt-nginx/letsencrypt_nginx/parser.py | 26 | 16596 | """NginxParser is a member object of the NginxConfigurator class."""
import glob
import logging
import os
import pyparsing
import re
from letsencrypt import errors
from letsencrypt_nginx import obj
from letsencrypt_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized abosulte path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~letsencrypt_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x: srv.append(x[1]))
# Find 'include' statements in server blocks and append their trees
for i, server in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = new_server
for filename in servers:
for server in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = _parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = list(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath)
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warn("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": ssl_options}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp'):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
"""
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
with open(filename, 'w') as _file:
nginxparser.dump(tree, _file)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def _has_server_names(self, entry, names):
"""Checks if a server block has the given set of server_names. This
is the primary way of identifying server blocks in the configurator.
Returns false if 'entry' doesn't look like a server block at all.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param list entry: The block to search
:param set names: The names to match
:rtype: bool
"""
if len(names) == 0:
# Nothing to identify blocks with
return False
if not isinstance(entry, list):
# Can't be a server block
return False
new_entry = self._get_included_directives(entry)
server_names = set()
for item in new_entry:
if not isinstance(item, list):
# Can't be a server block
return False
if item[0] == 'server_name':
server_names.update(_get_servernames(item[1]))
return server_names == names
def add_server_directives(self, filename, names, directives,
replace=False):
"""Add or replace directives in the first server block with names.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param str filename: The absolute filename of the config file
:param set names: The server_name to match
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
_do_for_subarray(self.parsed[filename],
lambda x: self._has_server_names(x, names),
lambda x: _add_directives(x, directives, replace))
def add_http_directives(self, filename, directives):
"""Adds directives to the first encountered HTTP block in filename.
:param str filename: The absolute filename of the config file
:param list directives: The directives to add
"""
_do_for_subarray(self.parsed[filename],
lambda x: x[0] == ['http'],
lambda x: _add_directives(x[1], [directives], False))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if isinstance(entry, list):
if condition(entry):
func(entry)
else:
for item in entry:
_do_for_subarray(item, condition, func)
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error:
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
entry[0] == 'include' and len(entry) == 2 and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def _parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {}
parsed_server['addrs'] = set()
parsed_server['ssl'] = False
parsed_server['names'] = set()
for directive in server:
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
return parsed_server
def _add_directives(block, directives, replace=False):
"""Adds or replaces directives in a block. If the directive doesn't exist in
the entry already, raises a misconfiguration error.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
if replace:
for directive in directives:
changed = False
if len(directive) == 0:
continue
for index, line in enumerate(block):
if len(line) > 0 and line[0] == directive[0]:
block[index] = directive
changed = True
if not changed:
raise errors.MisconfigurationError(
'LetsEncrypt expected directive for %s in the Nginx '
'config but did not find it.' % directive[0])
else:
block.extend(directives)
| apache-2.0 |
redhatrises/freeipa | ipatests/test_webui/ui_driver.py | 2 | 60841 | # Authors:
# Petr Vobornik <[email protected]>
#
# Copyright (C) 2013 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Base class for UI integration tests.
Contains browser driver and common tasks.
"""
from __future__ import print_function
import nose
from datetime import datetime
import time
import re
import os
from functools import wraps
from nose.plugins.skip import SkipTest
# pylint: disable=import-error
from six.moves.urllib.error import URLError
# pylint: enable=import-error
try:
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import InvalidElementStateException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support.ui import Select
NO_SELENIUM = False
except ImportError:
NO_SELENIUM = True
try:
import yaml
NO_YAML = False
except ImportError:
NO_YAML = True
from ipaplatform.paths import paths
ENV_MAP = {
'MASTER': 'ipa_server',
'ADMINID': 'ipa_admin',
'ADMINPW': 'ipa_password',
'DOMAIN': 'ipa_domain',
'IPA_REALM': 'ipa_realm',
'IPA_IP': 'ipa_ip',
'IPA_NO_CA': 'no_ca',
'IPA_NO_DNS': 'no_dns',
'IPA_HAS_TRUSTS': 'has_trusts',
'IPA_HAS_KRA': 'has_kra',
'IPA_HOST_CSR_PATH': 'host_csr_path',
'IPA_SERVICE_CSR_PATH': 'service_csr_path',
'AD_DOMAIN': 'ad_domain',
'AD_DC': 'ad_dc',
'AD_ADMIN': 'ad_admin',
'AD_PASSWORD': 'ad_password',
'AD_DC_IP': 'ad_dc_ip',
'TRUST_SECRET': 'trust_secret',
'SEL_TYPE': 'type',
'SEL_BROWSER': 'browser',
'SEL_HOST': 'host',
'FF_PROFILE': 'ff_profile',
}
DEFAULT_BROWSER = 'firefox'
DEFAULT_PORT = 4444
DEFAULT_TYPE = 'local'
def screenshot(fn):
"""
Decorator for saving screenshot on exception (test fail)
Should be applied on methods of UI_driver subclasses
"""
@wraps(fn)
def screenshot_wrapper(*args):
try:
return fn(*args)
except SkipTest:
raise
except Exception:
self = args[0]
name = '%s_%s_%s' % (
datetime.now().isoformat(),
self.__class__.__name__,
fn.__name__)
self.take_screenshot(name)
raise
return screenshot_wrapper
class UI_driver(object):
"""
Base class for all UI integration tests
"""
@classmethod
def setup_class(cls):
if NO_SELENIUM:
raise nose.SkipTest('Selenium not installed')
def setup(self, driver=None, config=None):
self.request_timeout = 30
self.driver = driver
self.config = config
if not config:
self.load_config()
if not self.driver:
self.driver = self.get_driver()
self.driver.maximize_window()
def teardown(self):
self.driver.quit()
def load_config(self):
"""
Load configuration
1) From ~/.ipa/ui_test.conf
2) From environmental variables
"""
# load config file
path = os.path.join(os.path.expanduser("~"), ".ipa/ui_test.conf")
if not NO_YAML and os.path.isfile(path):
try:
with open(path, 'r') as conf:
self.config = yaml.load(conf)
except yaml.YAMLError as e:
raise nose.SkipTest("Invalid Web UI config.\n%s" % e)
except IOError as e:
raise nose.SkipTest("Can't load Web UI test config: %s" % e)
else:
self.config = {}
c = self.config
# override with environmental variables
for k, v in ENV_MAP.items():
val = os.environ.get(k)
if val is not None:
c[v] = val
# apply defaults
if 'port' not in c:
c['port'] = DEFAULT_PORT
if 'browser' not in c:
c['browser'] = DEFAULT_BROWSER
if 'type' not in c:
c['type'] = DEFAULT_TYPE
def get_driver(self):
"""
Get WebDriver according to configuration
"""
browser = self.config["browser"]
port = self.config["port"]
driver_type = self.config["type"]
options = None
if browser == 'chromium':
options = ChromeOptions()
options.binary_location = paths.CHROMIUM_BROWSER
if driver_type == 'remote':
if 'host' not in self.config:
raise nose.SkipTest('Selenium server host not configured')
host = self.config["host"]
if browser == 'chrome':
capabilities = DesiredCapabilities.CHROME
elif browser == 'chromium':
capabilities = options.to_capabilities()
elif browser == 'ie':
capabilities = DesiredCapabilities.INTERNETEXPLORER
else:
capabilities = DesiredCapabilities.FIREFOX
try:
driver = webdriver.Remote(
command_executor='http://%s:%d/wd/hub' % (host, port),
desired_capabilities=capabilities)
except URLError as e:
raise nose.SkipTest('Error connecting to selenium server: %s' % e)
except RuntimeError as e:
raise nose.SkipTest('Error while establishing webdriver: %s' % e)
else:
try:
if browser == 'chrome' or browser == 'chromium':
driver = webdriver.Chrome(chrome_options=options)
elif browser == 'ie':
driver = webdriver.Ie()
else:
fp = None
if "ff_profile" in self.config:
fp = webdriver.FirefoxProfile(self.config["ff_profile"])
driver = webdriver.Firefox(fp)
except URLError as e:
raise nose.SkipTest('Error connecting to selenium server: %s' % e)
except RuntimeError as e:
raise nose.SkipTest('Error while establishing webdriver: %s' % e)
return driver
def find(self, expression, by='id', context=None, many=False, strict=False):
"""
Helper which calls selenium find_element_by_xxx methods.
expression: search expression
by: selenium.webdriver.common.by
context: element to search on. Default: driver
many: all matching elements
strict: error out when element is not found
Returns None instead of raising exception when element is not found.
"""
assert expression, 'expression is missing'
if context is None:
context = self.driver
if not many:
method_name = 'find_element'
else:
method_name = 'find_elements'
try:
func = getattr(context, method_name)
result = func(by, expression)
except NoSuchElementException:
if strict:
raise
else:
result = None
return result
def files_loaded(self):
"""
Test if dependencies were loaded. (Checks if UI has been rendered)
"""
indicator = self.find(".global-activity-indicator", By.CSS_SELECTOR)
return indicator is not None
def has_ca(self):
"""
FreeIPA server was installed with CA.
"""
return not self.config.get('no_ca')
def has_dns(self):
"""
FreeIPA server was installed with DNS.
"""
return not self.config.get('no_dns')
def has_trusts(self):
"""
FreeIPA server was installed with Trusts.
"""
return self.config.get('has_trusts')
def has_kra(self):
"""
FreeIPA server was installed with Kra.
"""
return self.config.get('has_kra')
def has_active_request(self):
"""
Check if there is running AJAX request
"""
global_indicators = self.find(".global-activity-indicator", By.CSS_SELECTOR, many=True)
for el in global_indicators:
try:
if not self.has_class(el, 'closed'):
return True
except StaleElementReferenceException:
# we don't care. Happens when indicator is part of removed dialog.
continue
return False
def wait(self, seconds=0.2):
"""
Wait specific amount of seconds
"""
time.sleep(seconds)
def wait_for_request(self, implicit=0.2, n=1, d=0):
"""
Wait for AJAX request to finish
"""
runner = self
for _i in range(n):
self.wait(implicit)
WebDriverWait(self.driver, self.request_timeout).until_not(lambda d: runner.has_active_request())
self.wait()
self.wait(d)
def xpath_has_val(self, attr, val):
"""
Create xpath expression for matching a presence of item in attribute
value where value is a list of items separated by space.
"""
return "contains(concat(' ',normalize-space(@%s), ' '),' %s ')" % (attr, val)
def init_app(self, login=None, password=None):
"""
Load and login
"""
self.load()
self.wait(0.5)
self.login(login, password)
# metadata + default page
self.wait_for_request(n=5)
def load(self):
"""
Navigate to Web UI first page and wait for loading of all dependencies.
"""
self.driver.get(self.get_base_url())
runner = self
WebDriverWait(self.driver, 10).until(lambda d: runner.files_loaded())
def login(self, login=None, password=None, new_password=None):
"""
Log in if user is not logged in.
"""
self.wait_for_request(n=2)
if not self.logged_in():
if not login:
login = self.config['ipa_admin']
if not password:
password = self.config['ipa_password']
if not new_password:
new_password = password
auth = self.get_login_screen()
login_tb = self.find("//input[@type='text'][@name='username']", 'xpath', auth, strict=True)
psw_tb = self.find("//input[@type='password'][@name='password']", 'xpath', auth, strict=True)
login_tb.send_keys(login)
psw_tb.send_keys(password)
psw_tb.send_keys(Keys.RETURN)
self.wait(0.5)
self.wait_for_request(n=2)
# reset password if needed
newpw_tb = self.find("//input[@type='password'][@name='new_password']", 'xpath', auth)
verify_tb = self.find("//input[@type='password'][@name='verify_password']", 'xpath', auth)
if newpw_tb and newpw_tb.is_displayed():
newpw_tb.send_keys(new_password)
verify_tb.send_keys(new_password)
verify_tb.send_keys(Keys.RETURN)
self.wait(0.5)
self.wait_for_request(n=2)
def logged_in(self):
"""
Check if user is logged in
"""
login_as = self.find('loggedinas', 'class name')
visible_name = len(login_as.text) > 0
logged_in = not self.login_screen_visible() and visible_name
return logged_in
def logout(self):
self.profile_menu_action('logout')
def get_login_screen(self):
"""
Get reference of login screen
"""
return self.find('rcue-login-screen', 'id')
def login_screen_visible(self):
"""
Check if login screen is visible
"""
screen = self.get_login_screen()
return screen and screen.is_displayed()
def take_screenshot(self, name):
if self.config.get('save_screenshots'):
scr_dir = self.config.get('screenshot_dir')
path = name + '.png'
if scr_dir:
path = os.path.join(scr_dir, path)
self.driver.get_screenshot_as_file(path)
def navigate_to_entity(self, entity, facet=None):
self.driver.get(self.get_url(entity, facet))
self.wait_for_request(n=3, d=0.4)
def navigate_by_menu(self, item, complete=True):
"""
Navigate by using menu
"""
if complete:
parts = item.split('/')
if len(parts) > 1:
parent = parts[0:-1]
self.navigate_by_menu('/'.join(parent), complete)
s = ".navbar a[href='#%s']" % item
link = self.find(s, By.CSS_SELECTOR, strict=True)
assert link.is_displayed(), 'Navigation link is not displayed: %s' % item
link.click()
self.wait_for_request()
self.wait_for_request(0.4)
def navigate_by_breadcrumb(self, item):
"""
Navigate by breadcrumb navigation
"""
facet = self.get_facet()
nav = self.find('.breadcrumb', By.CSS_SELECTOR, facet, strict=True)
a = self.find(item, By.LINK_TEXT, nav, strict=True)
a.click()
self.wait_for_request()
self.wait_for_request(0.4)
def switch_to_facet(self, name):
"""
Click on tab with given name
"""
facet = self.get_facet()
tabs = "div.facet-tabs"
sidebar = "div.sidebar-pf"
facets_container = self.find(tabs, By.CSS_SELECTOR, facet)
# handle sidebar instead of facet-tabs
# the webui facet can have only the facet-tabs OR sidebar, not both
if not facets_container:
facets_container = self.find(sidebar, By.CSS_SELECTOR, facet)
s = "li[name='%s'] a" % name
link = self.find(s, By.CSS_SELECTOR, facets_container, strict=True)
link.click()
# double wait because of facet's paging
self.wait_for_request(0.5)
self.wait_for_request()
def get_url(self, entity, facet=None):
"""
Create entity url
"""
url = [self.get_base_url(), '#', 'e', entity]
if facet:
url.append(facet)
return '/'.join(url)
def get_base_url(self):
"""
Get FreeIPA Web UI url
"""
host = self.config.get('ipa_server')
if not host:
self.skip('FreeIPA server hostname not configured')
return 'https://%s/ipa/ui' % host
def get_facet(self):
"""
Get currently displayed facet
"""
facet = self.find('.active-facet', By.CSS_SELECTOR)
assert facet is not None, "Current facet not found"
return facet
def get_facet_info(self, facet=None):
"""
Get information of currently displayed facet
"""
info = {}
# get facet
if facet is None:
facet = self.get_facet()
info["element"] = facet
#get facet name and entity
info["name"] = facet.get_attribute('data-name')
info["entity"] = facet.get_attribute('data-entity')
# get facet title
el = self.find(".facet-header h3 *:first-child", By.CSS_SELECTOR, facet)
if el:
info["title"] = el.text
# get facet pkey
el = self.find(".facet-header h3 span.facet-pkey", By.CSS_SELECTOR, facet)
if el:
info["pkey"] = el.text
return info
def get_dialogs(self, strict=False, name=None):
"""
Get all dialogs in DOM
"""
s = '.modal-dialog'
if name:
s += "[data-name='%s']" % name
dialogs = self.find(s, By.CSS_SELECTOR, many=True)
if strict:
assert dialogs, "No dialogs found"
return dialogs
def get_dialog(self, strict=False, name=None):
"""
Get last opened dialog
"""
dialogs = self.get_dialogs(strict, name)
dialog = None
if len(dialogs):
dialog = dialogs[-1]
return dialog
def get_last_error_dialog(self, dialog_name='error_dialog'):
"""
Get last opened error dialog or None.
"""
s = ".modal-dialog[data-name='%s']" % dialog_name
dialogs = self.find(s, By.CSS_SELECTOR, many=True)
dialog = None
if dialogs:
dialog = dialogs[-1]
return dialog
def get_dialog_info(self):
"""
Get last open dialog info: name, text if any.
Returns None if no dialog is open.
"""
dialog = self.get_dialog()
info = None
if dialog:
body = self.find('.modal-body', By.CSS_SELECTOR, dialog, strict=True)
info = {
'name': dialog.get_attribute('data-name'),
'text': body.text,
}
return info
def execute_api_from_ui(self, method, args, options, timeout=30):
"""
Executes FreeIPA API command/method from Web UI
"""
script = """
var method = arguments[0];
var args = arguments[1];
var options = arguments[2];
var callback = arguments[arguments.length - 1];
var rpc = require('freeipa/rpc');
var cmd = rpc.command({
method: method,
args: args,
options: options,
on_success: callback,
on_error: callback
});
cmd.execute();
"""
self.driver.set_script_timeout(timeout)
result = self.driver.execute_async_script(script, *[method, args, options])
return result
def click_on_link(self, text, parent=None):
"""
Click on link with given text and parent.
"""
if not parent:
parent = self.get_form()
link = self.find(text, By.LINK_TEXT, parent, strict=True)
link.click()
def facet_button_click(self, name):
"""
Click on facet button with given name
"""
facet = self.get_facet()
s = ".facet-controls button[name=%s]" % name
self._button_click(s, facet, name)
def dialog_button_click(self, name, dialog=None):
"""
Click on dialog button with given name
Chooses last dialog if none is supplied
"""
if not dialog:
dialog = self.get_dialog(strict=True)
s = ".rcue-dialog-buttons button[name='%s']" % name
self._button_click(s, dialog, name)
def action_button_click(self, name, parent):
"""
Click on .action-button
"""
if not parent:
parent = self.get_form()
s = "a[name='%s'].action-button" % name
self._button_click(s, parent, name)
def button_click(self, name, parent=None,
parents_css_sel=None):
"""
Click on .ui-button
"""
if not parent:
if parents_css_sel:
parent = self.find(parents_css_sel, By.CSS_SELECTOR,
strict=True)
else:
parent = self.get_form()
s = "[name='%s'].btn" % name
self._button_click(s, parent, name)
def _button_click(self, selector, parent, name=''):
btn = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
ActionChains(self.driver).move_to_element(btn).perform()
disabled = btn.get_attribute("disabled")
assert btn.is_displayed(), 'Button is not displayed: %s' % name
assert not disabled, 'Invalid button state: disabled. Button: %s' % name
btn.click()
self.wait_for_request()
def profile_menu_action(self, name):
"""
Execute action from profile menu
"""
menu_toggle = self.find('[name=profile-menu] > a', By.CSS_SELECTOR)
menu_toggle.click()
s = "[name=profile-menu] a[href='#%s']" % name
btn = self.find(s, By.CSS_SELECTOR, strict=True)
btn.click()
# action is usually followed by opening a dialog, add wait to compensate
# possible dialog transition effect
self.wait(0.5)
def get_form(self):
"""
Get last dialog or visible facet
"""
form = self.get_dialog()
if not form:
form = self.get_facet()
return form
def select(self, selector, value, parent=None):
"""
Select option with given value in select element
"""
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
Select(el).select_by_value(value)
def fill_text(self, selector, value, parent=None):
"""
Clear and enter text into input defined by selector.
Use for non-standard fields.
"""
if not parent:
parent = self.get_form()
tb = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
try:
tb.clear()
tb.send_keys(value)
except InvalidElementStateException as e:
msg = "Invalid Element State, el: %s, value: %s, error: %s" % (selector, value, e)
assert False, msg
def fill_input(self, name, value, input_type="text", parent=None):
"""
Type into input element specified by name and type.
"""
s = "div[name='%s'] input[type='%s'][name='%s']" % (name, input_type, name)
self.fill_text(s, value, parent)
def fill_textarea(self, name, value, parent=None):
"""
Clear and fill textarea.
"""
s = "textarea[name='%s']" % (name)
self.fill_text(s, value, parent)
def fill_textbox(self, name, value, parent=None):
"""
Clear and fill textbox.
"""
self.fill_input(name, value, "text", parent)
def fill_password(self, name, value, parent=None):
"""
Clear and fill input[type=password]
"""
self.fill_input(name, value, "password", parent)
def add_multivalued(self, name, value, parent=None):
"""
Add new value to multivalued textbox
"""
if not parent:
parent = self.get_form()
s = "div[name='%s'].multivalued-widget" % name
w = self.find(s, By.CSS_SELECTOR, parent, strict=True)
add_btn = self.find("button[name=add]", By.CSS_SELECTOR, w, strict=True)
add_btn.click()
s = "div[name=value] input"
inputs = self.find(s, By.CSS_SELECTOR, w, many=True)
last = inputs[-1]
last.send_keys(value)
def del_multivalued(self, name, value, parent=None):
"""
Mark value in multivalued textbox as deleted.
"""
if not parent:
parent = self.get_form()
s = "div[name='%s'].multivalued-widget" % name
w = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = "div[name=value] input"
inputs = self.find(s, By.CSS_SELECTOR, w, many=True)
clicked = False
for i in inputs:
val = i.get_attribute('value')
n = i.get_attribute('name')
if val == value:
s = "input[name='%s'] ~ .input-group-btn button[name=remove]" % n
link = self.find(s, By.CSS_SELECTOR, w, strict=True)
link.click()
self.wait()
clicked = True
assert clicked, 'Value was not removed: %s' % value
def fill_multivalued(self, name, instructions, parent=None):
"""
Add or delete a value from multivalued field
"""
for instruction in instructions:
t = instruction[0]
value = instruction[1]
if t == 'add':
self.add_multivalued(name, value, parent)
else:
self.del_multivalued(name, value, parent)
def check_option(self, name, value=None, parent=None):
"""
Find checkbox or radio with name which matches ^NAME\d$ and
check it by clicking on a label.
"""
if not parent:
parent = self.get_form()
s = "//input[@type='checkbox' or 'radio'][contains(@name, '%s')]" % name
if value is not None:
s += "[@value='%s']" % value
opts = self.find(s, "xpath", parent, many=True)
label = None
# Select only the one which matches exactly the name
for o in opts:
n = o.get_attribute("name")
if n == name or re.match("^%s\d+$" % name, n):
s = "label[for='%s']" % o.get_attribute("id")
label = self.find(s, By.CSS_SELECTOR, parent, strict=True)
break
assert label is not None, "Option not found: %s" % name
label.click()
def select_combobox(self, name, value, parent=None, combobox_input=None):
"""
Select value in a combobox. Search if not found.
"""
if not parent:
parent = self.get_form()
s = "[name='%s'].combobox-widget" % name
cb = self.find(s, By.CSS_SELECTOR, parent, strict=True)
open_btn = self.find('a[name=open] i', By.CSS_SELECTOR, cb, strict=True)
open_btn.click()
self.wait()
self.wait_for_request()
list_cnt = self.find('.combobox-widget-list', By.CSS_SELECTOR, cb, strict=True)
search_btn = self.find('a[name=search] i', By.CSS_SELECTOR, cb, strict=True)
opt_s = "select[name=list] option[value='%s']" % value
option = self.find(opt_s, By.CSS_SELECTOR, cb)
if combobox_input:
if not option:
self.fill_textbox(combobox_input, value, cb)
else:
if not option:
# try to search
self.fill_textbox('filter', value, cb)
search_btn.click()
self.wait_for_request()
option = self.find(opt_s, By.CSS_SELECTOR, cb, strict=True)
option.click()
# Chrome does not close search area on click
if list_cnt.is_displayed():
self.driver.switch_to_active_element().send_keys(Keys.RETURN)
self.wait()
def get_text(self, selector, parent=None):
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
return el.text
def get_value(self, selector, parent=None):
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
value = el.get_attribute('value')
return value
def get_field_text(self, name, parent=None, element='p'):
s = ".controls %s[name='%s']" % (element, name)
return self.get_text(s, parent)
def get_field_value(self, name, parent=None, element='input'):
s = ".controls %s[name='%s']" % (element, name)
return self.get_value(s, parent)
def get_multivalued_value(self, name, parent=None):
s = "div[name='%s'] div[name='value'] input[name^='%s']" % (name, name)
els = self.find(s, By.CSS_SELECTOR, parent, many=True)
values = []
for el in els:
values.append(el.get_attribute('value'))
return values
def get_field_checked(self, name, parent=None):
if not parent:
parent = self.get_form()
s = "div[name='%s'] input[name^='%s']" % (name, name)
els = self.find(s, By.CSS_SELECTOR, parent, strict=True, many=True)
values = []
for el in els:
if el.is_selected():
values.append(el.get_attribute('value'))
return values
def get_field_selected(self, name, parent=None):
if not parent:
parent = self.get_form()
s = "div[name='%s'] select[name='%s']" % (name, name)
el = self.find(s, By.CSS_SELECTOR, parent, strict=True)
select = Select(el)
selected = select.all_selected_options
values = []
for opt in selected:
values.append(opt.get_attribute('value'))
return values
def get_undo_buttons(self, field, parent):
"""
Get field undo button
"""
if not parent:
parent = self.get_form()
s = ".controls div[name='%s'] .btn.undo" % (field)
undos = self.find(s, By.CSS_SELECTOR, parent, strict=True, many=True)
return undos
def get_rows(self, parent=None, name=None):
"""
Return all rows of search table.
"""
if not parent:
parent = self.get_form()
# select table rows
s = self.get_table_selector(name)
s += ' tbody tr'
rows = self.find(s, By.CSS_SELECTOR, parent, many=True)
return rows
def get_row(self, pkey, parent=None, name=None):
"""
Get row element of search table with given pkey. None if not found.
"""
rows = self.get_rows(parent, name)
s = "input[value='%s']" % pkey
for row in rows:
has = self.find(s, By.CSS_SELECTOR, row)
if has:
return row
return None
def navigate_to_row_record(self, row, pkey_column=None):
"""
Navigate to record by clicking on a link.
"""
s = 'a'
if pkey_column:
s = "div[name='%s'] a" % pkey_column
link = self.find(s, By.CSS_SELECTOR, row, strict=True)
link.click()
self.wait_for_request(0.4)
self.wait_for_request()
def get_table_selector(self, name=None):
"""
Construct table selector
"""
s = "table"
if name:
s += "[name='%s']" % name
s += '.table'
return s
def select_record(self, pkey, parent=None, table_name=None):
"""
Select record with given pkey in search table.
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
input_s = s + " tbody td input[value='%s']" % pkey
checkbox = self.find(input_s, By.CSS_SELECTOR, parent, strict=True)
try:
ActionChains(self.driver).move_to_element(checkbox).click().perform()
except WebDriverException as e:
assert False, 'Can\'t click on checkbox label: %s \n%s' % (s, e)
self.wait()
assert checkbox.is_selected(), 'Record was not checked: %s' % input_s
self.wait()
def get_record_value(self, pkey, column, parent=None, table_name=None):
"""
Get table column's text value
"""
row = self.get_row(pkey, parent, table_name)
s = "div[name=%s]" % column
val = None
if row:
el = self.find(s, By.CSS_SELECTOR, row)
val = el.text
return val
def has_record(self, pkey, parent=None, table_name=None):
"""
Check if table contains specific record.
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
s += " tbody td input[value='%s']" % pkey
checkbox = self.find(s, By.CSS_SELECTOR, parent)
return checkbox is not None
def navigate_to_record(self, pkey, parent=None, table_name=None, entity=None, facet='search'):
"""
Clicks on record with given pkey in search table and thus cause
navigation to the record.
"""
if entity:
self.navigate_to_entity(entity, facet)
if not parent:
parent = self.get_facet()
s = self.get_table_selector(table_name)
s += " tbody"
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
link = self.find(pkey, By.LINK_TEXT, table, strict=True)
link.click()
self.wait_for_request()
def delete_record(
self, pkeys, fields=None, parent=None, table_name=None,
facet_btn='remove'):
"""
Delete records with given pkeys in currently opened search table.
"""
if type(pkeys) is not list:
pkeys = [pkeys]
# select
selected = False
for pkey in pkeys:
delete = self.has_record(pkey, parent, table_name)
if delete:
self.select_record(pkey, parent, table_name)
selected = True
# exec and confirm
if selected:
if table_name and parent:
s = self.get_table_selector(table_name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
self.button_click(facet_btn, table)
else:
self.facet_button_click(facet_btn)
if fields:
self.fill_fields(fields)
self.dialog_button_click('ok')
self.wait_for_request(n=2)
self.wait()
def delete(self, entity, data_list, facet='search', navigate=True):
"""
Delete entity records:
"""
if navigate:
self.navigate_to_entity(entity, facet)
for data in data_list:
pkey = data.get('pkey')
fields = data.get('del')
self.delete_record(pkey, fields)
def fill_fields(
self, fields, parent=None, undo=False, combobox_input=None):
"""
Fill dialog or facet inputs with give data.
Expected format:
[
('widget_type', 'key', value'),
('widget_type', 'key2', value2'),
]
"""
if not parent:
parent = self.get_form()
for field in fields:
widget_type = field[0]
key = field[1]
val = field[2]
if undo and not hasattr(key, '__call__'):
self.assert_undo_button(key, False, parent)
if widget_type == 'textbox':
self.fill_textbox(key, val, parent)
elif widget_type == 'textarea':
self.fill_textarea(key, val, parent)
elif widget_type == 'password':
self.fill_password(key, val, parent)
elif widget_type == 'radio':
self.check_option(key, val, parent)
elif widget_type == 'checkbox':
self.check_option(key, val, parent=parent)
elif widget_type == 'selectbox':
self.select('select[name=%s]' % key, val, parent)
elif widget_type == 'combobox':
self.select_combobox(
key, val, parent, combobox_input=combobox_input)
elif widget_type == 'add_table_record':
self.add_table_record(key, val, parent)
elif widget_type == 'add_table_association':
self.add_table_associations(key, val, parent)
elif widget_type == 'multivalued':
self.fill_multivalued(key, val, parent)
elif widget_type == 'table':
self.select_record(val, parent, key)
# this meta field specifies a function, to extend functionality of
# field checking
elif widget_type == 'callback':
if hasattr(key, '__call__'):
key(val)
self.wait()
if undo and not hasattr(key, '__call__'):
self.assert_undo_button(key, True, parent)
def validate_fields(self, fields, parent=None):
"""
Validate that fields on a page or dialog have desired values.
"""
if not fields:
return
if not parent:
parent = self.get_form()
for field in fields:
ftype = field[0]
key = field[1]
expected = field[2]
actual = None
if ftype == 'label':
actual = self.get_field_text(key, parent)
elif ftype in ('textbox', 'password', 'combobox'):
actual = self.get_field_value(key, parent, 'input')
elif ftype == 'textarea':
actual = self.get_field_value(key, parent, 'textarea')
elif ftype == 'radio':
actual = self.get_field_checked(key, parent)
elif ftype == 'checkbox':
actual = self.get_field_checked(key, parent)
elif ftype == 'multivalued':
actual = self.get_multivalued_value(key, parent)
elif ftype == 'table_record':
if self.has_record(expected, parent, key):
actual = expected
valid = False
if type(expected) == list:
valid = type(actual) == list and sorted(expected) == sorted(actual)
else:
# compare other values, usually strings:
valid = actual == expected
assert valid, "Values don't match. Expected: '%s', Got: '%s'" % (expected, actual)
def find_record(self, entity, data, facet='search', dummy='XXXXXXX'):
"""
Test search functionality of search facet.
1. search for non-existent value and test if result set is empty.
2. search for specific pkey and test if it's present on the page
3. reset search page by not using search criteria
"""
self.assert_facet(entity, facet)
facet = self.get_facet()
search_field_s = '.search-filter input[name=filter]'
key = data.get('pkey')
self.fill_text(search_field_s, dummy, facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
self.assert_record(key, negative=True)
self.fill_text(search_field_s, key, facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
self.assert_record(key)
self.fill_text(search_field_s, '', facet)
self.action_button_click('find', facet)
self.wait_for_request(n=2)
def add_record(self, entity, data, facet='search', facet_btn='add',
dialog_btn='add', delete=False, pre_delete=True,
dialog_name='add', navigate=True, combobox_input=None):
"""
Add records.
Expected data format:
{
'pkey': 'key',
add: [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
}
"""
pkey = data['pkey']
if navigate:
self.navigate_to_entity(entity, facet)
# check facet
self.assert_facet(entity, facet)
# delete if exists, ie. from previous test fail
if pre_delete:
self.delete_record(pkey, data.get('del'))
# current row count
self.wait_for_request(0.5)
count = len(self.get_rows())
# open add dialog
self.assert_no_dialog()
self.facet_button_click(facet_btn)
self.assert_dialog(dialog_name)
# fill dialog
self.fill_fields(data['add'], combobox_input=combobox_input)
# confirm dialog
self.dialog_button_click(dialog_btn)
self.wait_for_request()
self.wait_for_request()
# check expected error/warning/info
expected = ['error_4304_info']
dialog_info = self.get_dialog_info()
if dialog_info and dialog_info['name'] in expected:
self.dialog_button_click('ok')
self.wait_for_request()
# check for error
self.assert_no_error_dialog()
self.wait_for_request()
self.wait_for_request(0.4)
# check if table has more rows
new_count = len(self.get_rows())
# adjust because of paging
expected = count + 1
if count == 20:
expected = 20
self.assert_row_count(expected, new_count)
# delete record
if delete:
self.delete_record(pkey)
new_count = len(self.get_rows())
self.assert_row_count(count, new_count)
def mod_record(self, entity, data, facet='details', facet_btn='save'):
"""
Mod record
Assumes that it is already on details page.
"""
self.assert_facet(entity, facet)
# TODO assert pkey
self.assert_facet_button_enabled(facet_btn, enabled=False)
self.fill_fields(data['mod'], undo=True)
self.assert_facet_button_enabled(facet_btn)
self.facet_button_click(facet_btn)
self.wait_for_request()
self.wait_for_request()
self.assert_facet_button_enabled(facet_btn, enabled=False)
def basic_crud(self, entity, data,
parent_entity=None,
details_facet='details',
search_facet='search',
default_facet='details',
add_facet_btn='add',
add_dialog_btn='add',
add_dialog_name='add',
update_btn='save',
breadcrumb=None,
navigate=True,
delete=True):
"""
Basic CRUD operation sequence.
Expected data format:
{
'pkey': 'key',
'add': [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
'mod': [
('widget_type', 'key', 'value'),
('widget_type', 'key2', 'value2'),
],
}
"""
# important for nested entities. Ie. autoumount maps
if not parent_entity:
parent_entity = entity
pkey = data['pkey']
# 1. Open Search Facet
if navigate:
self.navigate_to_entity(parent_entity)
self.assert_facet(parent_entity, search_facet)
self.wait_for_request()
# 2. Add record
self.add_record(parent_entity, data, facet=search_facet, navigate=False,
facet_btn=add_facet_btn, dialog_name=add_dialog_name,
dialog_btn=add_dialog_btn
)
# Find
self.find_record(parent_entity, data, search_facet)
# 3. Navigate to details facet
self.navigate_to_record(pkey)
self.assert_facet(entity, default_facet)
self.wait_for_request(0.5)
if default_facet != details_facet:
self.switch_to_facet(details_facet)
self.assert_facet(entity, details_facet)
self.validate_fields(data.get('add_v'))
# 4. Mod values
if data.get('mod'):
self.mod_record(entity, data, details_facet, update_btn)
self.validate_fields(data.get('mod_v'))
if not breadcrumb:
self.navigate_to_entity(entity, search_facet)
else:
self.navigate_by_breadcrumb(breadcrumb)
# 5. Delete record
if delete:
self.delete_record(pkey, data.get('del'))
def add_table_record(self, name, data, parent=None):
"""
Add record to dnsrecord table, association table and similar
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = ".btn[name=%s]" % 'add'
btn = self.find(s, By.CSS_SELECTOR, table, strict=True)
btn.click()
self.wait()
self.fill_fields(data['fields'])
self.dialog_button_click('add')
self.wait_for_request()
def prepare_associations(
self, pkeys, facet=None, facet_btn='add', member_pkeys=None):
"""
Helper function for add_associations and delete_associations
"""
if facet:
self.switch_to_facet(facet)
self.facet_button_click(facet_btn)
self.wait()
self.wait_for_request()
for key in pkeys:
self.select_record(key, table_name='available')
self.button_click('add')
self.dialog_button_click('add')
self.wait_for_request()
if member_pkeys:
check_pkeys = member_pkeys
else:
check_pkeys = pkeys
return check_pkeys
def add_associations(
self, pkeys, facet=None, delete=False, facet_btn='add',
member_pkeys=None):
"""
Add associations
"""
check_pkeys = self.prepare_associations(
pkeys, facet, facet_btn, member_pkeys)
for key in check_pkeys:
self.assert_record(key)
if delete:
self.delete_record(key)
self.assert_record(key, negative=True)
def delete_associations(
self, pkeys, facet=None, facet_btn='remove', member_pkeys=None):
"""
Remove associations
"""
check_pkeys = self.prepare_associations(
pkeys, facet, facet_btn, member_pkeys)
for key in check_pkeys:
self.assert_record(key, negative=True)
def add_table_associations(self, table_name, pkeys, parent=False, delete=False):
"""
Add value to table (association|rule|...)
"""
if not parent:
parent = self.get_form()
s = self.get_table_selector(table_name)
table = self.find(s, By.CSS_SELECTOR, parent, strict=True)
s = "button[name='%s']" % 'add'
btn = self.find(s, By.CSS_SELECTOR, table, strict=True)
btn.click()
self.wait_for_request(0.4)
for key in pkeys:
self.select_record(key, table_name='available')
self.button_click('add')
self.wait()
self.dialog_button_click('add')
self.wait_for_request(n=2)
for key in pkeys:
self.assert_record(key, parent, table_name)
if delete:
self.delete_record(pkeys, None, parent, table_name)
for key in pkeys:
self.assert_record(key, parent, table_name, negative=True)
def action_list_action(self, name, confirm=True, confirm_btn="ok",
parents_css_sel=None):
"""
Execute action list action
"""
context = None
if not parents_css_sel:
context = self.find(".active-facet .facet-actions",
By.CSS_SELECTOR, strict=True)
else:
context = self.find(parents_css_sel, By.CSS_SELECTOR,
strict=True)
expand = self.find(".dropdown-toggle", By.CSS_SELECTOR, context,
strict=True)
expand.click()
action_link = self.find("li[data-name=%s] a" % name, By.CSS_SELECTOR,
context, strict=True)
action_link.click()
if confirm:
self.wait(0.5) # wait for dialog
self.dialog_button_click(confirm_btn)
self.wait()
def action_panel_action(self, panel_name, action):
"""
Execute action from action panel with given name.
"""
s = "div[data-name='%s'].action-panel" % panel_name
s += " a[data-name='%s']" % action
link = self.find(s, By.CSS_SELECTOR, strict=True)
link.click()
self.wait()
def enable_action(self):
"""
Execute and test 'enable' action panel action.
"""
title = self.find('.active-facet div.facet-title', By.CSS_SELECTOR, strict=True)
self.action_list_action('enable')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_class(title, 'disabled', negative=True)
def disable_action(self):
"""
Execute and test 'disable' action panel action.
"""
title = self.find('.active-facet div.facet-title', By.CSS_SELECTOR, strict=True)
self.action_list_action('disable')
self.wait_for_request(n=2)
self.assert_no_error_dialog()
self.assert_class(title, 'disabled')
def delete_action(self, entity, pkey, action='delete', facet='search'):
"""
Execute and test 'delete' action panel action.
"""
self.action_list_action(action)
self.wait_for_request(n=4)
self.assert_no_error_dialog()
self.assert_facet(entity, facet)
self.assert_record(pkey, negative=True)
def mod_rule_tables(self, tables, categories, no_categories):
"""
Test functionality of rule table widgets in a facet
"""
def get_t_vals(t):
table = t[0]
k = t[1]
e = []
if len(t) > 2:
e = t[2]
return table, k, e
t_list = [t[0] for t in tables if t[0] not in no_categories]
# add values
for t in tables:
table, keys, _exts = get_t_vals(t)
# add one by one to test for #3711
for key in keys:
self.add_table_associations(table, [key])
#disable tables
for cat in categories:
self.check_option(cat, 'all')
# update
self.assert_rule_tables_enabled(t_list, False)
self.facet_button_click('save')
self.wait_for_request(n=3, d=0.3)
self.assert_rule_tables_enabled(t_list, False)
p = self.get_form()
# now tables in categories should be empty, check it
for t in tables:
table, keys, _exts = get_t_vals(t)
if table in no_categories:
# clear the rest
self.delete_record(keys, None, p, table)
continue
for key in keys:
self.assert_record(key, p, table, negative=True)
# enable tables
for cat in categories:
self.check_option(cat, '')
self.assert_rule_tables_enabled(t_list, True)
self.facet_button_click('save')
self.wait_for_request(n=3, d=0.3)
self.assert_rule_tables_enabled(t_list, True)
for t in tables:
table, keys, _exts = get_t_vals(t)
# add multiple at once and test table delete button
self.add_table_associations(table, keys, delete=True)
def has_class(self, el, cls):
"""
Check if el has CSS class
"""
return cls in el.get_attribute("class").split()
def skip(self, reason):
"""
Skip tests
"""
raise nose.SkipTest(reason)
def assert_text(self, selector, value, parent=None):
"""
Assert read-only text value in details page or in a form
"""
text = self.get_text(selector, parent)
text = text.strip()
value = value.strip()
assert text == value, "Invalid value: '%s' Expected: %s" % (text, value)
def assert_text_field(self, name, value, parent=None, element='label'):
"""
Assert read-only text value in details page or in a form
"""
s = "div[name='%s'] %s[name='%s']" % (name, element, name)
self.assert_text(s, value, parent)
def assert_empty_value(self, selector, parent=None, negative=False):
"""
Assert empty value of some field in details page or in a form
"""
value = self.get_value(selector, parent)
if negative:
assert not value == ''
else:
assert value == ''
def assert_no_dialog(self):
"""
Assert that no dialog is opened
"""
dialogs = self.get_dialogs()
assert not dialogs, 'Invalid state: dialog opened'
def assert_dialog(self, name=None):
"""
Assert that one dialog is opened or a dialog with given name
"""
dialogs = self.get_dialogs(name)
assert len(dialogs) == 1, 'No or more than one dialog opened'
def assert_no_error_dialog(self):
"""
Assert that no error dialog is opened
"""
dialog = self.get_last_error_dialog()
ok = dialog is None
if not ok:
msg = self.find('p', By.CSS_SELECTOR, dialog).text
assert ok, 'Unexpected error: %s' % msg
def assert_row_count(self, expected, current):
"""
Assert that row counts match
"""
assert expected == current, "Rows don't match. Expected: %d, Got: %d" % (expected, current)
def assert_button_enabled(self, name, context_selector=None, enabled=True):
"""
Assert that button is enabled or disabled (expects that element will be
<button>)
"""
s = ""
if context_selector:
s = context_selector
s += "button[name=%s]" % name
facet = self.get_facet()
btn = self.find(s, By.CSS_SELECTOR, facet, strict=True)
valid = enabled == btn.is_enabled()
assert btn.is_displayed(), 'Button is not displayed'
assert valid, 'Button (%s) has incorrect enabled state (enabled==%s).' % (s, enabled)
def assert_facet_button_enabled(self, name, enabled=True):
"""
Assert that facet button is enabled or disabled
"""
self.assert_button_enabled(name, ".facet-controls ", enabled)
def assert_table_button_enabled(self, name, table_name, enabled=True):
"""
Assert that button in table is enabled/disabled
"""
s = "table[name='%s'] " % table_name
self.assert_button_enabled(name, s, enabled)
def assert_facet(self, entity, facet=None):
"""
Assert that current facet is correct
"""
info = self.get_facet_info()
if not facet is None:
assert info["name"] == facet, "Invalid facet. Expected: %s, Got: %s " % (facet, info["name"])
assert info["entity"] == entity, "Invalid entity. Expected: %s, Got: %s " % (entity, info["entity"])
def assert_undo_button(self, field, visible=True, parent=None):
"""
Assert that undo button is or is not visible
"""
undos = self.get_undo_buttons(field, parent)
state = False
for undo in undos:
if undo.is_displayed():
state = True
break
if visible:
assert state, "Undo button not visible. Field: %s" % field
else:
assert not state, "Undo button visible. Field: %s" % field
def assert_visible(self, selector, parent=None, negative=False):
"""
Assert that element defined by selector is visible
"""
if not parent:
parent = self.get_form()
el = self.find(selector, By.CSS_SELECTOR, parent, strict=True)
visible = el.is_displayed()
if negative:
assert not visible, "Element visible: %s" % selector
else:
assert visible, "Element not visible: %s" % selector
def assert_disabled(self, selector, parent=None, negative=False):
"""
Assert that element defined by selector is disabled
"""
if not parent:
parent = self.get_form()
self.find(selector, By.CSS_SELECTOR, parent, strict=True)
dis = self.find(selector+"[disabled]", By.CSS_SELECTOR, parent)
if negative:
assert dis is None, "Element is disabled: %s" % selector
else:
assert dis, "Element is not disabled: %s" % selector
def assert_record(self, pkey, parent=None, table_name=None, negative=False):
"""
Assert that record is in current search table
"""
has = self.has_record(pkey, parent, table_name)
has |= self.has_record(pkey.lower(), parent, table_name)
if negative:
assert not has, "Record exists when it shouldn't: %s" % pkey
else:
assert has, 'Record does not exist: %s' % pkey
def assert_indirect_record(self, pkey, entity, facet, negative=False, switch=True):
"""
Switch to indirect facet and assert record.
Lowers the key by default.
"""
if switch:
self.switch_to_facet(facet)
radio_name = "%s-%s-type-radio" % (entity, facet.replace('_', '-'))
self.check_option(radio_name, 'indirect')
self.wait_for_request(n=2)
key = pkey
self.assert_record(key, negative=negative)
def assert_record_value(self, expected, pkey, column, parent=None, table_name=None):
"""
Assert that column's value of record defined by pkey equals expected value.
"""
val = self.get_record_value(pkey, column, parent, table_name)
assert expected == val, "Invalid value: '%s'. Expected: '%s'." % (val, expected)
def assert_class(self, element, cls, negative=False):
"""
Assert that element has certain class
"""
valid = self.has_class(element, cls)
if negative:
assert not valid, "Element contains unwanted class: %s" % cls
else:
assert valid, "Element doesn't contain required class: %s" % cls
def assert_rule_tables_enabled(self, tables, enabled):
"""
Assert that rule table is editable - values can be added and removed.
"""
for table in tables:
self.assert_table_button_enabled('add', table, enabled)
def assert_menu_item(self, path, present=True):
"""
Assert that menu link is not rendered or visible
"""
s = ".navigation a[href='#%s']" % path
link = self.find(s, By.CSS_SELECTOR)
is_present = link is not None and link.is_displayed()
assert present == is_present, ('Invalid state of navigation item: %s. '
'Presence expected: %s') % (path, str(present))
def assert_action_panel_action(self, panel_name, action, visible=True, enabled=True):
"""
Assert that action panel action is visible/hidden, and enabled/disabled
Enabled is checked only if action is visible.
"""
s = "div[data-name='%s'].action-panel" % panel_name
s += " a[data-name='%s']" % action
link = self.find(s, By.CSS_SELECTOR)
is_visible = link is not None and link.is_displayed()
is_enabled = False
if is_visible:
is_enabled = not self.has_class(link, 'disabled')
assert is_visible == visible, ('Invalid visibility of action button: %s. '
'Expected: %s') % (action, str(visible))
if is_visible:
assert is_enabled == enabled, ('Invalid enabled state of action button %s. '
'Expected: %s') % (action, str(visible))
def assert_action_list_action(self, action, visible=True, enabled=True,
parent=None, parents_css_sel=None,
facet_actions=True):
"""
Assert that action dropdown action is visible/hidden, and enabled/disabled
Enabled is checked only if action is visible.
"""
li_s = " li[data-name='%s']" % action
if not parent:
parent = self.get_form()
if facet_actions:
li_s = ".facet-actions" + li_s
else:
li_s = parents_css_sel + li_s
li = self.find(li_s, By.CSS_SELECTOR, parent)
link = self.find("a", By.CSS_SELECTOR, li)
is_visible = li is not None and link is not None
is_enabled = False
assert is_visible == visible, ('Invalid visibility of action item: %s. '
'Expected: %s') % (action, str(visible))
if is_visible:
is_enabled = not self.has_class(li, 'disabled')
assert is_enabled == enabled, ('Invalid enabled state of action item %s. '
'Expected: %s') % (action, str(visible))
| gpl-3.0 |
fzimmermann89/pyload | module/common/APIExerciser.py | 41 | 4349 | # -*- coding: utf-8 -*-
import string
from threading import Thread
from random import choice, random, sample, randint
from time import time, sleep
from math import floor
import gc
from traceback import print_exc, format_exc
from module.remote.thriftbackend.ThriftClient import ThriftClient, Destination
def createURLs():
""" create some urls, some may fail """
urls = []
for x in range(0, randint(20, 100)):
name = "DEBUG_API"
if randint(0, 5) == 5:
name = "" #this link will fail
urls.append(name + "".join(sample(string.ascii_letters, randint(10, 20))))
return urls
AVOID = (0,3,8)
idPool = 0
sumCalled = 0
def startApiExerciser(core, n):
for i in range(n):
APIExerciser(core).start()
class APIExerciser(Thread):
def __init__(self, core, thrift=False, user=None, pw=None):
global idPool
Thread.__init__(self)
self.setDaemon(True)
self.core = core
self.count = 0 #number of methods
self.time = time()
if thrift:
self.api = ThriftClient(user=user, password=pw)
else:
self.api = core.api
self.id = idPool
idPool += 1
#self.start()
def run(self):
self.core.log.info("API Excerciser started %d" % self.id)
out = open("error.log", "ab")
#core errors are not logged of course
out.write("\n" + "Starting\n")
out.flush()
while True:
try:
self.testAPI()
except Exception:
self.core.log.error("Excerciser %d throw an execption" % self.id)
print_exc()
out.write(format_exc() + 2 * "\n")
out.flush()
if not self.count % 100:
self.core.log.info("Exerciser %d tested %d api calls" % (self.id, self.count))
if not self.count % 1000:
out.flush()
if not sumCalled % 1000: #not thread safe
self.core.log.info("Exercisers tested %d api calls" % sumCalled)
persec = sumCalled / (time() - self.time)
self.core.log.info("Approx. %.2f calls per second." % persec)
self.core.log.info("Approx. %.2f ms per call." % (1000 / persec))
self.core.log.info("Collected garbage: %d" % gc.collect())
#sleep(random() / 500)
def testAPI(self):
global sumCalled
m = ["statusDownloads", "statusServer", "addPackage", "getPackageData", "getFileData", "deleteFiles",
"deletePackages", "getQueue", "getCollector", "getQueueData", "getCollectorData", "isCaptchaWaiting",
"getCaptchaTask", "stopAllDownloads", "getAllInfo", "getServices" , "getAccounts", "getAllUserData"]
method = choice(m)
#print "Testing:", method
if hasattr(self, method):
res = getattr(self, method)()
else:
res = getattr(self.api, method)()
self.count += 1
sumCalled += 1
#print res
def addPackage(self):
name = "".join(sample(string.ascii_letters, 10))
urls = createURLs()
self.api.addPackage(name, urls, choice([Destination.Queue, Destination.Collector]))
def deleteFiles(self):
info = self.api.getQueueData()
if not info: return
pack = choice(info)
fids = pack.links
if len(fids):
fids = [f.fid for f in sample(fids, randint(1, max(len(fids) / 2, 1)))]
self.api.deleteFiles(fids)
def deletePackages(self):
info = choice([self.api.getQueue(), self.api.getCollector()])
if not info: return
pids = [p.pid for p in info]
if len(pids):
pids = sample(pids, randint(1, max(floor(len(pids) / 2.5), 1)))
self.api.deletePackages(pids)
def getFileData(self):
info = self.api.getQueueData()
if info:
p = choice(info)
if p.links:
self.api.getFileData(choice(p.links).fid)
def getPackageData(self):
info = self.api.getQueue()
if info:
self.api.getPackageData(choice(info).pid)
def getAccounts(self):
self.api.getAccounts(False)
def getCaptchaTask(self):
self.api.getCaptchaTask(False) | gpl-3.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/shortstack/package.py | 3 | 1943 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Shortstack(Package):
"""ShortStack is a tool developed to process and analyze smallRNA-seq data
with respect to a reference genome, and output a comprehensive and
informative annotation of all discovered small RNA genes."""
homepage = "http://sites.psu.edu/axtell/software/shortstack/"
url = "https://github.com/MikeAxtell/ShortStack/archive/v3.8.3.tar.gz"
version('3.8.3', '3f21f494f799039f3fa88ea343f2d20d')
depends_on('perl', type=('build', 'run'))
depends_on('samtools')
depends_on('viennarna')
depends_on('bowtie')
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('ShortStack', prefix.bin)
| lgpl-2.1 |
2014c2g4/2015cda_g7 | static/Brython3.1.0-20150301-090019/Lib/site-packages/pygame/pkgdata.py | 603 | 2146 | """pkgdata is a simple, extensible way for a package to acquire data file
resources.
The getResource function is equivalent to the standard idioms, such as
the following minimal implementation::
import sys, os
def getResource(identifier, pkgname=__name__):
pkgpath = os.path.dirname(sys.modules[pkgname].__file__)
path = os.path.join(pkgpath, identifier)
return file(os.path.normpath(path), mode='rb')
When a __loader__ is present on the module given by __name__, it will defer
getResource to its get_data implementation and return it as a file-like
object (such as StringIO).
"""
__all__ = ['getResource']
import sys
import os
#from cStringIO import StringIO
from io import StringIO
try:
# Try to use setuptools if available.
from pkg_resources import resource_stream
_have_resource_stream = True
except ImportError:
_have_resource_stream = False
def getResource(identifier, pkgname=__name__):
"""Acquire a readable object for a given package name and identifier.
An IOError will be raised if the resource can not be found.
For example::
mydata = getResource('mypkgdata.jpg').read()
Note that the package name must be fully qualified, if given, such
that it would be found in sys.modules.
In some cases, getResource will return a real file object. In that
case, it may be useful to use its name attribute to get the path
rather than use it as a file-like object. For example, you may
be handing data off to a C API.
"""
# Prefer setuptools
if _have_resource_stream:
return resource_stream(pkgname, identifier)
mod = sys.modules[pkgname]
fn = getattr(mod, '__file__', None)
if fn is None:
raise IOError("%r has no __file__!")
path = os.path.join(os.path.dirname(fn), identifier)
loader = getattr(mod, '__loader__', None)
if loader is not None:
try:
data = loader.get_data(path)
except IOError:
pass
else:
return StringIO(data)
#return file(os.path.normpath(path), 'rb')
return open(os.path.normpath(path), 'rb')
| gpl-3.0 |
jezdez/django-hosts | tests/test_defaults.py | 3 | 3037 | from django.core.exceptions import ImproperlyConfigured
from django_hosts.defaults import host, patterns
from django_hosts.resolvers import get_host_patterns
from .base import HostsTestCase
class PatternsTests(HostsTestCase):
def test_pattern(self):
host_patterns = patterns('',
host(r'api', 'api.urls', name='api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
self.assertEqual(repr(host_patterns[0]),
"<host api: regex='api' urlconf='api.urls' "
"scheme='//' port=''>")
def test_pattern_as_tuple(self):
host_patterns = patterns('',
(r'api', 'api.urls', 'api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
def test_pattern_with_duplicate(self):
api_host = host(r'api', 'api.urls', name='api')
self.assertRaises(ImproperlyConfigured,
patterns, '', api_host, api_host)
def test_pattern_with_prefix(self):
host_patterns = patterns('mysite',
host(r'api', 'api.urls', name='api'),
)
self.assertEqual(len(host_patterns), 1)
self.assertTrue(isinstance(host_patterns[0], host))
self.assertEqual(host_patterns[0].urlconf, 'mysite.api.urls')
class HostTests(HostsTestCase):
def test_host(self):
api_host = host(r'api', 'api.urls', name='api')
self.assertTrue(isinstance(api_host, host))
def test_host_prefix(self):
api_host = host(r'api', 'api.urls', name='api', prefix='spam.eggs')
self.assertEqual(api_host.urlconf, 'spam.eggs.api.urls')
def test_host_string_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback='django_hosts.resolvers.get_host_patterns')
self.assertEqual(api_host.callback, get_host_patterns)
def test_host_callable_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback=get_host_patterns)
self.assertEqual(api_host.callback, get_host_patterns)
def test_host_nonexistent_callback(self):
api_host = host(r'api', 'api.urls', name='api',
callback='whatever.non_existent')
self.assertRaisesMessage(ImproperlyConfigured,
"Could not import 'whatever'. Error was: No module named",
lambda: api_host.callback)
api_host = host(r'api', 'api.urls', name='api',
callback='django_hosts.non_existent')
self.assertRaisesMessage(ImproperlyConfigured,
"Could not import 'django_hosts.non_existent'. "
"Callable does not exist in module",
lambda: api_host.callback)
api_host = host(r'api', 'api.urls', name='api',
callback='tests.broken_module.yeah_yeah')
self.assertRaises(ImproperlyConfigured, lambda: api_host.callback)
| bsd-3-clause |
ojake/django | django/core/management/base.py | 83 | 23884 | # -*- coding: utf-8 -*-
"""
Base classes for writing management commands (named commands which can
be executed through ``django-admin`` or ``manage.py``).
"""
from __future__ import unicode_literals
import os
import sys
import warnings
from argparse import ArgumentParser
from optparse import OptionParser
import django
from django.core import checks
from django.core.management.color import color_style, no_style
from django.db import connections
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_str
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, cmd, **kwargs):
self.cmd = cmd
super(CommandParser, self).__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if (hasattr(self.cmd, 'missing_args_message') and
not (args or any(not arg.startswith('-') for arg in args))):
self.error(self.cmd.missing_args_message)
return super(CommandParser, self).parse_args(args, namespace)
def error(self, message):
if self.cmd._called_from_command_line:
super(CommandParser, self).error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class OutputWrapper(object):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, style_func=None, ending='\n'):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def isatty(self):
return hasattr(self._out, 'isatty') and self._out.isatty()
def write(self, msg, style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(force_str(style_func(msg)))
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<app_label
app_label ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
Deprecated and will be removed in Django 1.10.
``output_transaction``
A boolean indicating whether the command outputs SQL
statements; if ``True``, the output will automatically be
wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is
``False``.
``requires_system_checks``
A boolean; if ``True``, entire Django project will be checked for errors
prior to executing the command. Default value is ``True``.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``leave_locale_alone``
A boolean indicating whether the locale set in settings should be
preserved during the execution of the command instead of translations
being deactivated.
Default value is ``False``.
Make sure you know what you are doing if you decide to change the value
of this option in your custom command if it creates database content
that is locale-sensitive and such content shouldn't contain any
translations (like it happens e.g. with django.contrib.auth
permissions) as activating any locale might cause unintended effects.
This option can't be False when the can_import_settings option is set
to False too because attempting to deactivate translations needs access
to settings. This condition will generate a CommandError.
"""
# Metadata about this command.
option_list = ()
help = ''
args = ''
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
can_import_settings = True
output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;"
leave_locale_alone = False
requires_system_checks = True
def __init__(self, stdout=None, stderr=None, no_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color:
self.style = no_style()
else:
self.style = color_style()
self.stderr.style_func = self.style.ERROR
@property
def use_argparse(self):
return not bool(self.option_list)
def get_version(self):
"""
Return the Django version, which should be correct for all
built-in Django commands. User-supplied commands should
override this method.
"""
return django.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
if not self.use_argparse:
def store_as_int(option, opt_str, value, parser):
setattr(parser.values, option.dest, int(value))
# Backwards compatibility: use deprecated optparse module
warnings.warn("OptionParser usage for Django management commands "
"is deprecated, use ArgumentParser instead",
RemovedInDjango110Warning)
parser = OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version())
parser.add_option('-v', '--verbosity', action='callback', dest='verbosity', default=1,
type='choice', choices=['0', '1', '2', '3'], callback=store_as_int,
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_option('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_option('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'),
parser.add_option('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_option('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
for opt in self.option_list:
parser.add_option(opt)
else:
parser = CommandParser(self, prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None)
parser.add_argument('--version', action='version', version=self.get_version())
parser.add_argument('-v', '--verbosity', action='store', dest='verbosity', default='1',
type=int, choices=[0, 1, 2, 3],
help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output')
parser.add_argument('--settings',
help=(
'The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, the '
'DJANGO_SETTINGS_MODULE environment variable will be used.'
),
)
parser.add_argument('--pythonpath',
help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".')
parser.add_argument('--traceback', action='store_true',
help='Raise on CommandError exceptions')
parser.add_argument('--no-color', action='store_true', dest='no_color', default=False,
help="Don't colorize the command output.")
if self.args:
# Keep compatibility and always accept positional arguments, like optparse when args is set
parser.add_argument('args', nargs='*')
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
if self.use_argparse:
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop('args', ())
else:
options, args = parser.parse_args(argv[2:])
cmd_options = vars(options)
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except Exception as e:
if options.traceback or not isinstance(e, CommandError):
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write('%s: %s' % (e.__class__.__name__, e))
sys.exit(1)
finally:
connections.close_all()
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options.get('no_color'):
self.style = no_style()
self.stderr.style_func = None
if options.get('stdout'):
self.stdout = OutputWrapper(options['stdout'])
if options.get('stderr'):
self.stderr = OutputWrapper(options.get('stderr'), self.stderr.style_func)
saved_locale = None
if not self.leave_locale_alone:
# Only mess with locales if we can assume we have a working
# settings file, because django.utils.translation requires settings
# (The final saying about whether the i18n machinery is active will be
# found in the value of the USE_I18N setting)
if not self.can_import_settings:
raise CommandError("Incompatible values of 'leave_locale_alone' "
"(%s) and 'can_import_settings' (%s) command "
"options." % (self.leave_locale_alone,
self.can_import_settings))
# Deactivate translations, because django-admin creates database
# content like permissions, and those shouldn't contain any
# translations.
from django.utils import translation
saved_locale = translation.get_language()
translation.deactivate_all()
try:
if (self.requires_system_checks and
not options.get('skip_validation') and # Remove at the end of deprecation for `skip_validation`.
not options.get('skip_checks')):
self.check()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()))
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD(connection.ops.end_transaction_sql()))
finally:
if saved_locale is not None:
translation.activate(saved_locale)
def check(self, app_configs=None, tags=None, display_num_errors=False,
include_deployment_checks=False):
"""
Uses the system check framework to validate entire Django project.
Raises CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), they are printed to
stderr and no exception is raised.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [e for e in all_issues if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()]
warnings = [e for e in all_issues if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()]
errors = [e for e in all_issues if checks.ERROR <= e.level < checks.CRITICAL]
criticals = [e for e in all_issues if checks.CRITICAL <= e.level]
sorted_issues = [
(criticals, 'CRITICALS'),
(errors, 'ERRORS'),
(warnings, 'WARNINGS'),
(infos, 'INFOS'),
(debugs, 'DEBUGS'),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(force_str(e))
if e.is_serious()
else self.style.WARNING(force_str(e))
for e in issues)
formatted = "\n".join(sorted(formatted))
body += '\n%s:\n%s\n' % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += '\n'
footer += "System check identified %s (%s silenced)." % (
"no issues" if visible_issue_count == 0 else
"1 issue" if visible_issue_count == 1 else
"%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious() and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError('subclasses of BaseCommand must provide a handle() method')
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='+',
help='One or more application label.')
def handle(self, *app_labels, **options):
from django.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide"
"a handle_app_config() method.")
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = 'label'
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument('args', metavar=self.label, nargs='+')
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError('subclasses of LabelCommand must provide a handle_label() method')
class NoArgsCommand(BaseCommand):
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def __init__(self):
warnings.warn(
"NoArgsCommand class is deprecated and will be removed in Django 1.10. "
"Use BaseCommand instead, which takes no arguments by default.",
RemovedInDjango110Warning
)
super(NoArgsCommand, self).__init__()
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError('subclasses of NoArgsCommand must provide a handle_noargs() method')
| bsd-3-clause |
mm112287/2015cda_g8 | static/Brython3.1.0-20150301-090019/Lib/shutil.py | 720 | 39101 | """Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
import tarfile
try:
import bz2
del bz2
_BZ2_SUPPORTED = True
except ImportError:
_BZ2_SUPPORTED = False
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format",
"get_unpack_formats", "register_unpack_format",
"unregister_unpack_format", "unpack_archive",
"ignore_patterns", "chown", "which"]
# disk_usage is added later, if available on the platform
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
class ReadError(EnvironmentError):
"""Raised when an archive cannot be read"""
class RegistryError(Exception):
"""Raised when a registry operation with the archiving
and unpacking registeries fails"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst, *, follow_symlinks=True):
"""Copy data from src to dst.
If follow_symlinks is not set and src is a symbolic link, a new
symlink will be created instead of copying the file it points to.
"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
if not follow_symlinks and os.path.islink(src):
os.symlink(os.readlink(src), dst)
else:
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
return dst
def copymode(src, dst, *, follow_symlinks=True):
"""Copy mode bits from src to dst.
If follow_symlinks is not set, symlinks aren't followed if and only
if both `src` and `dst` are symlinks. If `lchmod` isn't available
(e.g. Linux) this method does nothing.
"""
if not follow_symlinks and os.path.islink(src) and os.path.islink(dst):
if hasattr(os, 'lchmod'):
stat_func, chmod_func = os.lstat, os.lchmod
else:
return
elif hasattr(os, 'chmod'):
stat_func, chmod_func = os.stat, os.chmod
else:
return
st = stat_func(src)
chmod_func(dst, stat.S_IMODE(st.st_mode))
if hasattr(os, 'listxattr'):
def _copyxattr(src, dst, *, follow_symlinks=True):
"""Copy extended filesystem attributes from `src` to `dst`.
Overwrite existing attributes.
If `follow_symlinks` is false, symlinks won't be followed.
"""
try:
names = os.listxattr(src, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.ENOTSUP, errno.ENODATA):
raise
return
for name in names:
try:
value = os.getxattr(src, name, follow_symlinks=follow_symlinks)
os.setxattr(dst, name, value, follow_symlinks=follow_symlinks)
except OSError as e:
if e.errno not in (errno.EPERM, errno.ENOTSUP, errno.ENODATA):
raise
else:
def _copyxattr(*args, **kwargs):
pass
def copystat(src, dst, *, follow_symlinks=True):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst.
If the optional flag `follow_symlinks` is not set, symlinks aren't followed if and
only if both `src` and `dst` are symlinks.
"""
def _nop(*args, ns=None, follow_symlinks=None):
pass
# follow symlinks (aka don't not follow symlinks)
follow = follow_symlinks or not (os.path.islink(src) and os.path.islink(dst))
if follow:
# use the real function if it exists
def lookup(name):
return getattr(os, name, _nop)
else:
# use the real function only if it exists
# *and* it supports follow_symlinks
def lookup(name):
fn = getattr(os, name, _nop)
if fn in os.supports_follow_symlinks:
return fn
return _nop
st = lookup("stat")(src, follow_symlinks=follow)
mode = stat.S_IMODE(st.st_mode)
lookup("utime")(dst, ns=(st.st_atime_ns, st.st_mtime_ns),
follow_symlinks=follow)
try:
lookup("chmod")(dst, mode, follow_symlinks=follow)
except NotImplementedError:
# if we got a NotImplementedError, it's because
# * follow_symlinks=False,
# * lchown() is unavailable, and
# * either
# * fchownat() is unavailable or
# * fchownat() doesn't implement AT_SYMLINK_NOFOLLOW.
# (it returned ENOSUP.)
# therefore we're out of options--we simply cannot chown the
# symlink. give up, suppress the error.
# (which is what shutil always did in this circumstance.)
pass
if hasattr(st, 'st_flags'):
try:
lookup("chflags")(dst, st.st_flags, follow_symlinks=follow)
except OSError as why:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and why.errno == getattr(errno, err):
break
else:
raise
_copyxattr(src, dst, follow_symlinks=follow)
def copy(src, dst, *, follow_symlinks=True):
"""Copy data and mode bits ("cp src dst"). Return the file's destination.
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copymode(src, dst, follow_symlinks=follow_symlinks)
return dst
def copy2(src, dst, *, follow_symlinks=True):
"""Copy data and all stat info ("cp -p src dst"). Return the file's
destination."
The destination may be a directory.
If follow_symlinks is false, symlinks won't be followed. This
resembles GNU's "cp -P src dst".
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst, follow_symlinks=follow_symlinks)
copystat(src, dst, follow_symlinks=follow_symlinks)
return dst
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2,
ignore_dangling_symlinks=False):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
# version vulnerable to race conditions
def _rmtree_unsafe(path, onerror):
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
_rmtree_unsafe(fullname, onerror)
else:
try:
os.unlink(fullname)
except os.error:
onerror(os.unlink, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
# Version using fd-based APIs to protect against races
def _rmtree_safe_fd(topfd, path, onerror):
names = []
try:
names = os.listdir(topfd)
except OSError as err:
err.filename = path
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
orig_st = os.stat(name, dir_fd=topfd, follow_symlinks=False)
mode = orig_st.st_mode
except OSError:
mode = 0
if stat.S_ISDIR(mode):
try:
dirfd = os.open(name, os.O_RDONLY, dir_fd=topfd)
except OSError:
onerror(os.open, fullname, sys.exc_info())
else:
try:
if os.path.samestat(orig_st, os.fstat(dirfd)):
_rmtree_safe_fd(dirfd, fullname, onerror)
try:
os.rmdir(name, dir_fd=topfd)
except OSError:
onerror(os.rmdir, fullname, sys.exc_info())
else:
try:
# This can only happen if someone replaces
# a directory with a symlink after the call to
# stat.S_ISDIR above.
raise OSError("Cannot call rmtree on a symbolic "
"link")
except OSError:
onerror(os.path.islink, fullname, sys.exc_info())
finally:
os.close(dirfd)
else:
try:
os.unlink(name, dir_fd=topfd)
except OSError:
onerror(os.unlink, fullname, sys.exc_info())
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is platform and implementation dependent;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
if _use_fd_functions:
# While the unsafe rmtree works fine on bytes, the fd based does not.
if isinstance(path, bytes):
path = os.fsdecode(path)
# Note: To guard against symlink races, we use the standard
# lstat()/open()/fstat() trick.
try:
orig_st = os.lstat(path)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
fd = os.open(path, os.O_RDONLY)
except Exception:
onerror(os.lstat, path, sys.exc_info())
return
try:
if os.path.samestat(orig_st, os.fstat(fd)):
_rmtree_safe_fd(fd, path, onerror)
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
else:
try:
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
finally:
os.close(fd)
else:
return _rmtree_unsafe(path, onerror)
# Allow introspection of whether or not the hardening against symlink
# attacks is supported on the current platform
rmtree.avoids_symlink_attacks = _use_fd_functions
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command. Return the file or directory's
destination.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed. Symlinks are
recreated under the new name if os.rename() fails because of cross
filesystem renames.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error("Destination path '%s' already exists" % real_dst)
try:
os.rename(src, real_dst)
except OSError:
if os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, real_dst)
os.unlink(src)
elif os.path.isdir(src):
if _destinsrc(src, dst):
raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst))
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
return real_dst
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', None: ''}
compress_ext = {'gzip': '.gz'}
if _BZ2_SUPPORTED:
tar_compression['bzip2'] = 'bz2'
compress_ext['bzip2'] = '.bz2'
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext:
raise ValueError("bad value for 'compress', or compression format not "
"supported : {0}".format(compress))
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')],
"bzip2'ed tar-file")
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not callable(function):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2:
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError("unknown archive format '%s'" % format)
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
def get_unpack_formats():
"""Returns a list of supported formats for unpacking.
Each element of the returned sequence is a tuple
(name, extensions, description)
"""
formats = [(name, info[0], info[3]) for name, info in
_UNPACK_FORMATS.items()]
formats.sort()
return formats
def _check_unpack_options(extensions, function, extra_args):
"""Checks what gets registered as an unpacker."""
# first make sure no other unpacker is registered for this extension
existing_extensions = {}
for name, info in _UNPACK_FORMATS.items():
for ext in info[0]:
existing_extensions[ext] = name
for extension in extensions:
if extension in existing_extensions:
msg = '%s is already registered for "%s"'
raise RegistryError(msg % (extension,
existing_extensions[extension]))
if not callable(function):
raise TypeError('The registered function must be a callable')
def register_unpack_format(name, extensions, function, extra_args=None,
description=''):
"""Registers an unpack format.
`name` is the name of the format. `extensions` is a list of extensions
corresponding to the format.
`function` is the callable that will be
used to unpack archives. The callable will receive archives to unpack.
If it's unable to handle an archive, it needs to raise a ReadError
exception.
If provided, `extra_args` is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_unpack_formats() function.
"""
if extra_args is None:
extra_args = []
_check_unpack_options(extensions, function, extra_args)
_UNPACK_FORMATS[name] = extensions, function, extra_args, description
def unregister_unpack_format(name):
"""Removes the pack format from the registery."""
del _UNPACK_FORMATS[name]
def _ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def _unpack_zipfile(filename, extract_dir):
"""Unpack zip `filename` to `extract_dir`
"""
try:
import zipfile
except ImportError:
raise ReadError('zlib not supported, cannot unpack this archive.')
if not zipfile.is_zipfile(filename):
raise ReadError("%s is not a zip file" % filename)
zip = zipfile.ZipFile(filename)
try:
for info in zip.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
_ensure_directory(target)
if not name.endswith('/'):
# file
data = zip.read(info.filename)
f = open(target, 'wb')
try:
f.write(data)
finally:
f.close()
del data
finally:
zip.close()
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
_UNPACK_FORMATS = {
'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"),
'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"),
'zip': (['.zip'], _unpack_zipfile, [], "ZIP file")
}
if _BZ2_SUPPORTED:
_UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [],
"bzip2'ed tar-file")
def _find_unpack_format(filename):
for name, info in _UNPACK_FORMATS.items():
for extension in info[0]:
if filename.endswith(extension):
return name
return None
def unpack_archive(filename, extract_dir=None, format=None):
"""Unpack an archive.
`filename` is the name of the archive.
`extract_dir` is the name of the target directory, where the archive
is unpacked. If not provided, the current working directory is used.
`format` is the archive format: one of "zip", "tar", or "gztar". Or any
other registered format. If not provided, unpack_archive will use the
filename extension and see if an unpacker was registered for that
extension.
In case none is found, a ValueError is raised.
"""
if extract_dir is None:
extract_dir = os.getcwd()
if format is not None:
try:
format_info = _UNPACK_FORMATS[format]
except KeyError:
raise ValueError("Unknown unpack format '{0}'".format(format))
func = format_info[1]
func(filename, extract_dir, **dict(format_info[2]))
else:
# we need to look at the registered unpackers supported extensions
format = _find_unpack_format(filename)
if format is None:
raise ReadError("Unknown archive format '{0}'".format(filename))
func = _UNPACK_FORMATS[format][1]
kwargs = dict(_UNPACK_FORMATS[format][2])
func(filename, extract_dir, **kwargs)
if hasattr(os, 'statvfs'):
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned value is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt':
import nt
__all__.append('disk_usage')
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
def disk_usage(path):
"""Return disk usage statistics about the given path.
Returned values is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
"""
total, free = nt._getdiskusage(path)
used = total - free
return _ntuple_diskusage(total, used, free)
def chown(path, user=None, group=None):
"""Change owner user and group of the given path.
user and group can be the uid/gid or the user/group names, and in that case,
they are converted to their respective uid/gid.
"""
if user is None and group is None:
raise ValueError("user and/or group must be set")
_user = user
_group = group
# -1 means don't change it
if user is None:
_user = -1
# user can either be an int (the uid) or a string (the system username)
elif isinstance(user, str):
_user = _get_uid(user)
if _user is None:
raise LookupError("no such user: {!r}".format(user))
if group is None:
_group = -1
elif not isinstance(group, int):
_group = _get_gid(group)
if _group is None:
raise LookupError("no such group: {!r}".format(group))
os.chown(path, _user, _group)
def get_terminal_size(fallback=(80, 24)):
"""Get the size of the terminal window.
For each of the two dimensions, the environment variable, COLUMNS
and LINES respectively, is checked. If the variable is defined and
the value is a positive integer, it is used.
When COLUMNS or LINES is not defined, which is the common case,
the terminal connected to sys.__stdout__ is queried
by invoking os.get_terminal_size.
If the terminal size cannot be successfully queried, either because
the system doesn't support querying, or because we are not
connected to a terminal, the value given in fallback parameter
is used. Fallback defaults to (80, 24) which is the default
size used by many terminal emulators.
The value returned is a named tuple of type os.terminal_size.
"""
# columns, lines are the working values
try:
columns = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
columns = 0
try:
lines = int(os.environ['LINES'])
except (KeyError, ValueError):
lines = 0
# only query if necessary
if columns <= 0 or lines <= 0:
try:
size = os.get_terminal_size(sys.__stdout__.fileno())
except (NameError, OSError):
size = os.terminal_size(fallback)
if columns <= 0:
columns = size.columns
if lines <= 0:
lines = size.lines
return os.terminal_size((columns, lines))
def which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if not os.curdir in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if not normdir in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
| gpl-3.0 |
mlaitinen/odoo | addons/auth_ldap/users_ldap.py | 133 | 10812 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ldap
import logging
from ldap.filter import filter_format
import openerp.exceptions
from openerp import tools
from openerp.osv import fields, osv
from openerp import SUPERUSER_ID
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
class CompanyLDAP(osv.osv):
_name = 'res.company.ldap'
_order = 'sequence'
_rec_name = 'ldap_server'
def get_ldap_dicts(self, cr, ids=None):
"""
Retrieve res_company_ldap resources from the database in dictionary
format.
:param list ids: Valid ids of model res_company_ldap. If not \
specified, process all resources (unlike other ORM methods).
:return: ldap configurations
:rtype: list of dictionaries
"""
if ids:
id_clause = 'AND id IN (%s)'
args = [tuple(ids)]
else:
id_clause = ''
args = []
cr.execute("""
SELECT id, company, ldap_server, ldap_server_port, ldap_binddn,
ldap_password, ldap_filter, ldap_base, "user", create_user,
ldap_tls
FROM res_company_ldap
WHERE ldap_server != '' """ + id_clause + """ ORDER BY sequence
""", args)
return cr.dictfetchall()
def connect(self, conf):
"""
Connect to an LDAP server specified by an ldap
configuration dictionary.
:param dict conf: LDAP configuration
:return: an LDAP object
"""
uri = 'ldap://%s:%d' % (conf['ldap_server'],
conf['ldap_server_port'])
connection = ldap.initialize(uri)
if conf['ldap_tls']:
connection.start_tls_s()
return connection
def authenticate(self, conf, login, password):
"""
Authenticate a user against the specified LDAP server.
In order to prevent an unintended 'unauthenticated authentication',
which is an anonymous bind with a valid dn and a blank password,
check for empty passwords explicitely (:rfc:`4513#section-6.3.1`)
:param dict conf: LDAP configuration
:param login: username
:param password: Password for the LDAP user
:return: LDAP entry of authenticated user or False
:rtype: dictionary of attributes
"""
if not password:
return False
entry = False
filter = filter_format(conf['ldap_filter'], (login,))
try:
results = self.query(conf, filter)
# Get rid of (None, attrs) for searchResultReference replies
results = [i for i in results if i[0]]
if results and len(results) == 1:
dn = results[0][0]
conn = self.connect(conf)
conn.simple_bind_s(dn, password.encode('utf-8'))
conn.unbind()
entry = results[0]
except ldap.INVALID_CREDENTIALS:
return False
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return entry
def query(self, conf, filter, retrieve_attributes=None):
"""
Query an LDAP server with the filter argument and scope subtree.
Allow for all authentication methods of the simple authentication
method:
- authenticated bind (non-empty binddn + valid password)
- anonymous bind (empty binddn + empty password)
- unauthenticated authentication (non-empty binddn + empty password)
.. seealso::
:rfc:`4513#section-5.1` - LDAP: Simple Authentication Method.
:param dict conf: LDAP configuration
:param filter: valid LDAP filter
:param list retrieve_attributes: LDAP attributes to be retrieved. \
If not specified, return all attributes.
:return: ldap entries
:rtype: list of tuples (dn, attrs)
"""
results = []
try:
conn = self.connect(conf)
ldap_password = conf['ldap_password'] or ''
ldap_binddn = conf['ldap_binddn'] or ''
conn.simple_bind_s(ldap_binddn.encode('utf-8'), ldap_password.encode('utf-8'))
results = conn.search_st(conf['ldap_base'], ldap.SCOPE_SUBTREE,
filter, retrieve_attributes, timeout=60)
conn.unbind()
except ldap.INVALID_CREDENTIALS:
_logger.error('LDAP bind failed.')
except ldap.LDAPError, e:
_logger.error('An LDAP exception occurred: %s', e)
return results
def map_ldap_attributes(self, cr, uid, conf, login, ldap_entry):
"""
Compose values for a new resource of model res_users,
based upon the retrieved ldap entry and the LDAP settings.
:param dict conf: LDAP configuration
:param login: the new user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: parameters for a new resource of model res_users
:rtype: dict
"""
values = { 'name': ldap_entry[1]['cn'][0],
'login': login,
'company_id': conf['company']
}
return values
def get_or_create_user(self, cr, uid, conf, login, ldap_entry,
context=None):
"""
Retrieve an active resource of model res_users with the specified
login. Create the user if it is not initially found.
:param dict conf: LDAP configuration
:param login: the user's login
:param tuple ldap_entry: single LDAP result (dn, attrs)
:return: res_users id
:rtype: int
"""
user_id = False
login = tools.ustr(login.lower().strip())
cr.execute("SELECT id, active FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
if res[1]:
user_id = res[0]
elif conf['create_user']:
_logger.debug("Creating new Odoo user \"%s\" from LDAP" % login)
user_obj = self.pool['res.users']
values = self.map_ldap_attributes(cr, uid, conf, login, ldap_entry)
if conf['user']:
values['active'] = True
user_id = user_obj.copy(cr, SUPERUSER_ID, conf['user'],
default=values)
else:
user_id = user_obj.create(cr, SUPERUSER_ID, values)
return user_id
_columns = {
'sequence': fields.integer('Sequence'),
'company': fields.many2one('res.company', 'Company', required=True,
ondelete='cascade'),
'ldap_server': fields.char('LDAP Server address', required=True),
'ldap_server_port': fields.integer('LDAP Server port', required=True),
'ldap_binddn': fields.char('LDAP binddn',
help=("The user account on the LDAP server that is used to query "
"the directory. Leave empty to connect anonymously.")),
'ldap_password': fields.char('LDAP password',
help=("The password of the user account on the LDAP server that is "
"used to query the directory.")),
'ldap_filter': fields.char('LDAP filter', required=True),
'ldap_base': fields.char('LDAP base', required=True),
'user': fields.many2one('res.users', 'Template User',
help="User to copy when creating new users"),
'create_user': fields.boolean('Create user',
help="Automatically create local user accounts for new users authenticating via LDAP"),
'ldap_tls': fields.boolean('Use TLS',
help="Request secure TLS/SSL encryption when connecting to the LDAP server. "
"This option requires a server with STARTTLS enabled, "
"otherwise all authentication attempts will fail."),
}
_defaults = {
'ldap_server': '127.0.0.1',
'ldap_server_port': 389,
'sequence': 10,
'create_user': True,
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'ldaps': fields.one2many(
'res.company.ldap', 'company', 'LDAP Parameters', copy=True, groups="base.group_system"),
}
class users(osv.osv):
_inherit = "res.users"
def _login(self, db, login, password):
user_id = super(users, self)._login(db, login, password)
if user_id:
return user_id
registry = RegistryManager.get(db)
with registry.cursor() as cr:
cr.execute("SELECT id FROM res_users WHERE lower(login)=%s", (login,))
res = cr.fetchone()
if res:
return False
ldap_obj = registry.get('res.company.ldap')
for conf in ldap_obj.get_ldap_dicts(cr):
entry = ldap_obj.authenticate(conf, login, password)
if entry:
user_id = ldap_obj.get_or_create_user(
cr, SUPERUSER_ID, conf, login, entry)
if user_id:
break
return user_id
def check_credentials(self, cr, uid, password):
try:
super(users, self).check_credentials(cr, uid, password)
except openerp.exceptions.AccessDenied:
cr.execute('SELECT login FROM res_users WHERE id=%s AND active=TRUE',
(int(uid),))
res = cr.fetchone()
if res:
ldap_obj = self.pool['res.company.ldap']
for conf in ldap_obj.get_ldap_dicts(cr):
if ldap_obj.authenticate(conf, res[0], password):
return
raise
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gonicus/gosa | backend/src/gosa/backend/plugins/samba/logonhours.py | 1 | 2755 | # This file is part of the GOsa framework.
#
# http://gosa-project.org
#
# Copyright:
# (C) 2016 GONICUS GmbH, Germany, http://www.gonicus.de
#
# See the LICENSE file in the project's top-level directory for details.
import time
from gosa.backend.objects.types import AttributeType
class SambaLogonHoursAttribute(AttributeType):
"""
This is a special object-attribute-type for sambaLogonHours.
This call can convert sambaLogonHours to a UnicodeString and vice versa.
It is used in the samba-object definition file.
"""
__alias__ = "SambaLogonHours"
def values_match(self, value1, value2):
return str(value1) == str(value2)
def is_valid_value(self, value):
if len(value):
try:
# Check if each week day contains 24 values.
if type(value[0]) is not str or len(value[0]) != 168 or len(set(value[0]) - set('01')):
return False
return True
except:
return False
def _convert_to_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts the 'SambaLogonHours' object-type into a 'UnicodeString'-object.
"""
if len(value):
# Combine the binary strings
lstr = value[0]
# New reverse every 8 bit part, and toggle high- and low-tuple (4Bits)
new = ""
for i in range(0, 21):
n = lstr[i * 8:((i + 1) * 8)]
n = n[0:4] + n[4:]
n = n[::-1]
n = str(hex(int(n, 2)))[2::].rjust(2, '0')
new += n
value = [new.upper()]
return value
def _convert_from_string(self, value):
return self._convert_from_unicodestring(value)
def _convert_from_unicodestring(self, value):
"""
This method is a converter used when values gets read from or written to the backend.
Converts a 'UnicodeString' attribute into the 'SambaLogonHours' object-type.
"""
if len(value):
# Convert each hex-pair into binary values.
# Then reverse the binary result and switch high and low pairs.
value = value[0]
lstr = ""
for i in range(0, 42, 2):
n = (bin(int(value[i:i + 2], 16))[2::]).rjust(8, '0')
n = n[::-1]
lstr += n[0:4] + n[4:]
# Shift lster by timezone offset
shift_by = int((168 + (time.timezone/3600)) % 168)
lstr = lstr[shift_by:] + lstr[:shift_by]
# Parse result into more readable value
value = [lstr]
return value
| lgpl-2.1 |
JavaRabbit/CS496_capstone | appengine/standard/Capstone_inPython/reportlab/graphics/testshapes.py | 3 | 17300 | #!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/testshapes.py
# testshapes.py - draws shapes onto a PDF canvas.
__version__ = ''' $Id $ '''
__doc__='''Execute this script to see some test drawings.
This contains a number of routines to generate test drawings
for reportlab/graphics. For now many of them are contrived,
but we will expand them to try and trip up any parser.
Feel free to add more.
'''
import os, sys, base64
from reportlab.lib import colors
from reportlab.lib.units import cm
from reportlab.lib.utils import asNative
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.platypus import Flowable
from reportlab.graphics.shapes import *
from reportlab.graphics.renderPDF import _PDFRenderer
import unittest
_FONTS = ['Times-Roman','Vera','Times-BoldItalic',]
def _setup():
from reportlab.pdfbase import pdfmetrics, ttfonts
pdfmetrics.registerFont(ttfonts.TTFont("Vera", "Vera.ttf"))
pdfmetrics.registerFont(ttfonts.TTFont("VeraBd", "VeraBd.ttf"))
pdfmetrics.registerFont(ttfonts.TTFont("VeraIt", "VeraIt.ttf"))
pdfmetrics.registerFont(ttfonts.TTFont("VeraBI", "VeraBI.ttf"))
F = ['Times-Roman','Courier','Helvetica','Vera', 'VeraBd', 'VeraIt', 'VeraBI']
if sys.platform=='win32':
for name, ttf in [
('Adventurer Light SF','Advlit.ttf'),('ArialMS','ARIAL.TTF'),
('Arial Unicode MS', 'ARIALUNI.TTF'),
('Book Antiqua','BKANT.TTF'),
('Century Gothic','GOTHIC.TTF'),
('Comic Sans MS', 'COMIC.TTF'),
('Elementary Heavy SF Bold','Vwagh.ttf'),
('Firenze SF','flot.ttf'),
('Garamond','GARA.TTF'),
('Jagger','Rols.ttf'),
('Monotype Corsiva','MTCORSVA.TTF'),
('Seabird SF','seag.ttf'),
('Tahoma','TAHOMA.TTF'),
('VerdanaMS','VERDANA.TTF'),
]:
for D in (r'c:\WINNT',r'c:\Windows'):
fn = os.path.join(D,'Fonts',ttf)
if os.path.isfile(fn):
try:
f = ttfonts.TTFont(name, fn)
pdfmetrics.registerFont(f)
F.append(name)
except:
pass
return F
def resetFonts():
for f in _setup():
if f not in _FONTS:
_FONTS.append(f)
from reportlab.rl_config import register_reset
register_reset(resetFonts)
resetFonts()
#########################################################
#
# Collections of shape drawings.
#
#########################################################
def getFailedDrawing(funcName):
"""Generate a drawing in case something goes really wrong.
This will create a drawing to be displayed whenever some
other drawing could not be executed, because the generating
function does something terribly wrong! The box contains
an attention triangle, plus some error message.
"""
D = Drawing(400, 200)
points = [200,170, 140,80, 260,80]
D.add(Polygon(points,
strokeWidth=0.5*cm,
strokeColor=colors.red,
fillColor=colors.yellow))
s = String(200, 40,
"Error in generating function '%s'!" % funcName,
textAnchor='middle')
D.add(s)
return D
# These are the real drawings to be eye-balled.
def getDrawing01():
"""Hello World, on a rectangular background.
The rectangle's fillColor is yellow.
The string's fillColor is red.
"""
D = Drawing(400, 200)
D.add(Rect(50, 50, 300, 100, fillColor=colors.yellow))
D.add(String(180,100, 'Hello World', fillColor=colors.red))
D.add(String(180,86, b'Special characters \xc2\xa2\xc2\xa9\xc2\xae\xc2\xa3\xce\xb1\xce\xb2', fillColor=colors.red))
return D
def getDrawing02():
"""Various Line shapes.
The lines are blue and their strokeWidth is 5 mm.
One line has a strokeDashArray set to [5, 10, 15].
"""
D = Drawing(400, 200)
D.add(Line(50,50, 300,100,
strokeColor=colors.blue,
strokeWidth=0.5*cm,
))
D.add(Line(50,100, 300,50,
strokeColor=colors.blue,
strokeWidth=0.5*cm,
strokeDashArray=[5, 10, 15],
))
#x = 1/0 # Comment this to see the actual drawing!
return D
def getDrawing03():
"""Text strings in various sizes and different fonts.
Font size increases from 12 to 36 and from bottom left
to upper right corner. The first ones should be in
Times-Roman. Finally, a solitary Courier string at
the top right corner.
"""
D = Drawing(400, 200)
for size in range(12, 36, 4):
D.add(String(10+size*2,
10+size*2,
'Hello World',
fontName=_FONTS[0],
fontSize=size))
D.add(String(150, 150,
'Hello World',
fontName=_FONTS[1],
fontSize=36))
return D
def getDrawing04():
"""Text strings in various colours.
Colours are blue, yellow and red from bottom left
to upper right.
"""
D = Drawing(400, 200)
i = 0
for color in (colors.blue, colors.yellow, colors.red):
D.add(String(50+i*30, 50+i*30,
'Hello World', fillColor=color))
i = i + 1
return D
def getDrawing05():
"""Text strings with various anchors (alignments).
Text alignment conforms to the anchors in the left column.
"""
D = Drawing(400, 200)
lineX = 250
D.add(Line(lineX,10, lineX,190, strokeColor=colors.gray))
y = 130
for anchor in ('start', 'middle', 'end'):
D.add(String(lineX, y, 'Hello World', textAnchor=anchor))
D.add(String(50, y, anchor + ':'))
y = y - 30
return D
def getDrawing06():
"""This demonstrates all the basic shapes at once.
There are no groups or references.
Each solid shape should have a green fill.
"""
green = colors.green
D = Drawing(400, 200) #, fillColor=green)
D.add(Line(10,10, 390,190))
D.add(Circle(100,100,20, fillColor=green))
D.add(Circle(200,100,40, fillColor=green))
D.add(Circle(300,100,30, fillColor=green))
D.add(Wedge(330,100,40, -10,40, fillColor=green))
D.add(PolyLine([120,10, 130,20, 140,10, 150,20, 160,10,
170,20, 180,10, 190,20, 200,10], fillColor=green))
D.add(Polygon([300,20, 350,20, 390,80, 300,75, 330,40], fillColor=green))
D.add(Ellipse(50,150, 40, 20, fillColor=green))
D.add(Rect(120,150, 60,30,
strokeWidth=10,
strokeColor=colors.yellow,
fillColor=green)) #square corners
D.add(Rect(220, 150, 60, 30, 10, 10, fillColor=green)) #round corners
D.add(String(10,50, 'Basic Shapes', fillColor=colors.black, fontName='Helvetica'))
return D
def getDrawing07():
"""This tests the ability to translate and rotate groups. The first set of axes should be
near the bottom left of the drawing. The second should be rotated counterclockwise
by 15 degrees. The third should be rotated by 30 degrees."""
D = Drawing(400, 200)
Axis = Group(
Line(0,0,100,0), #x axis
Line(0,0,0,50), # y axis
Line(0,10,10,10), #ticks on y axis
Line(0,20,10,20),
Line(0,30,10,30),
Line(0,40,10,40),
Line(10,0,10,10), #ticks on x axis
Line(20,0,20,10),
Line(30,0,30,10),
Line(40,0,40,10),
Line(50,0,50,10),
Line(60,0,60,10),
Line(70,0,70,10),
Line(80,0,80,10),
Line(90,0,90,10),
String(20, 35, 'Axes', fill=colors.black)
)
firstAxisGroup = Group(Axis)
firstAxisGroup.translate(10,10)
D.add(firstAxisGroup)
secondAxisGroup = Group(Axis)
secondAxisGroup.translate(150,10)
secondAxisGroup.rotate(15)
D.add(secondAxisGroup)
thirdAxisGroup = Group(Axis, transform=mmult(translate(300,10), rotate(30)))
D.add(thirdAxisGroup)
return D
def getDrawing08():
"""This tests the ability to scale coordinates. The bottom left set of axes should be
near the bottom left of the drawing. The bottom right should be stretched vertically
by a factor of 2. The top left one should be stretched horizontally by a factor of 2.
The top right should have the vertical axiss leaning over to the right by 30 degrees."""
D = Drawing(400, 200)
Axis = Group(
Line(0,0,100,0), #x axis
Line(0,0,0,50), # y axis
Line(0,10,10,10), #ticks on y axis
Line(0,20,10,20),
Line(0,30,10,30),
Line(0,40,10,40),
Line(10,0,10,10), #ticks on x axis
Line(20,0,20,10),
Line(30,0,30,10),
Line(40,0,40,10),
Line(50,0,50,10),
Line(60,0,60,10),
Line(70,0,70,10),
Line(80,0,80,10),
Line(90,0,90,10),
String(20, 35, 'Axes', fill=colors.black)
)
firstAxisGroup = Group(Axis)
firstAxisGroup.translate(10,10)
D.add(firstAxisGroup)
secondAxisGroup = Group(Axis)
secondAxisGroup.translate(150,10)
secondAxisGroup.scale(1,2)
D.add(secondAxisGroup)
thirdAxisGroup = Group(Axis)
thirdAxisGroup.translate(10,125)
thirdAxisGroup.scale(2,1)
D.add(thirdAxisGroup)
fourthAxisGroup = Group(Axis)
fourthAxisGroup.translate(250,125)
fourthAxisGroup.skew(30,0)
D.add(fourthAxisGroup)
return D
def getDrawing09():
"""This tests rotated strings
Some renderers will have a separate mechanism for font drawing. This test
just makes sure strings get transformed the same way as regular graphics."""
D = Drawing(400, 200)
fontName = _FONTS[0]
fontSize = 12
text = "I should be totally horizontal and enclosed in a box"
textWidth = stringWidth(text, fontName, fontSize)
g1 = Group(
String(20, 20, text, fontName=fontName, fontSize = fontSize),
Rect(18, 18, textWidth + 4, fontSize + 4, fillColor=None)
)
D.add(g1)
text = "I should slope up by 15 degrees, so my right end is higher than my left"
textWidth = stringWidth(text, fontName, fontSize)
g2 = Group(
String(20, 20, text, fontName=fontName, fontSize = fontSize),
Rect(18, 18, textWidth + 4, fontSize + 4, fillColor=None)
)
g2.translate(0, 50)
g2.rotate(15)
D.add(g2)
return D
def getDrawing10():
"""This tests nested groups with multiple levels of coordinate transformation.
Each box should be staggered up and to the right, moving by 25 points each time."""
D = Drawing(400, 200)
fontName = _FONTS[0]
fontSize = 12
g1 = Group(
Rect(0, 0, 100, 20, fillColor=colors.yellow),
String(5, 5, 'Text in the box', fontName=fontName, fontSize = fontSize)
)
D.add(g1)
g2 = Group(g1, transform = translate(25,25))
D.add(g2)
g3 = Group(g2, transform = translate(25,25))
D.add(g3)
g4 = Group(g3, transform = translate(25,25))
D.add(g4)
return D
from reportlab.graphics.widgets.signsandsymbols import SmileyFace
def getDrawing11():
'''test of anchoring'''
def makeSmiley(x, y, size, color):
"Make a smiley data item representation."
d = size
s = SmileyFace()
s.fillColor = color
s.x = x-d
s.y = y-d
s.size = d*2
return s
D = Drawing(400, 200) #, fillColor=colors.purple)
g = Group(transform=(1,0,0,1,0,0))
g.add(makeSmiley(100,100,10,colors.red))
g.add(Line(90,100,110,100,strokeColor=colors.green))
g.add(Line(100,90,100,110,strokeColor=colors.green))
D.add(g)
g = Group(transform=(2,0,0,2,100,-100))
g.add(makeSmiley(100,100,10,colors.blue))
g.add(Line(90,100,110,100,strokeColor=colors.green))
g.add(Line(100,90,100,110,strokeColor=colors.green))
D.add(g)
g = Group(transform=(2,0,0,2,0,0))
return D
def getDrawing12():
"""Text strings in a non-standard font.
All that is required is to place the .afm and .pfb files
on the font patch given in rl_config.py,
for example in reportlab/lib/fonts/.
"""
faceName = "DarkGardenMK"
D = Drawing(400, 200)
for size in range(12, 36, 4):
D.add(String(10+size*2,
10+size*2,
'Hello World',
fontName=faceName,
fontSize=size))
return D
def getDrawing13():
'Test Various TTF Fonts'
def drawit(F,w=400,h=200,fontSize=12,slack=2,gap=5):
D = Drawing(w,h)
th = 2*gap + fontSize*1.2
gh = gap + .2*fontSize
y = h
maxx = 0
for fontName in F:
y -= th
text = fontName+asNative(b': I should be totally horizontal and enclosed in a box and end in alphabetagamma \xc2\xa2\xc2\xa9\xc2\xae\xc2\xa3\xca\xa5\xd0\x96\xd6\x83\xd7\x90\xd9\x82\xe0\xa6\x95\xce\xb1\xce\xb2\xce\xb3')
textWidth = stringWidth(text, fontName, fontSize)
maxx = max(maxx,textWidth+20)
D.add(
Group(Rect(8, y-gh, textWidth + 4, th, strokeColor=colors.red, strokeWidth=.5, fillColor=colors.lightgrey),
String(10, y, text, fontName=fontName, fontSize = fontSize)))
y -= 5
return maxx, h-y+gap, D
maxx, maxy, D = drawit(_FONTS)
if maxx>400 or maxy>200: _,_,D = drawit(_FONTS,maxx,maxy)
return D
def smallArrow():
'''create a small PIL image'''
from reportlab.graphics.renderPM import _getImage
from reportlab.lib.utils import getBytesIO
b = base64.decodestring(b'''R0lGODdhCgAHAIMAAP/////29v/d3f+ysv9/f/9VVf9MTP8iIv8ICP8AAAAAAAAAAAAAAAAAAAAA
AAAAACwAAAAACgAHAAAIMwABCBxIsKABAQASFli4MAECAgEAJJhIceKBAQkyasx4YECBjx8TICAQ
AIDJkwYEAFgZEAA7''')
return _getImage().open(getBytesIO(b))
def getDrawing14():
'''test shapes.Image'''
from reportlab.graphics.shapes import Image
D = Drawing(400, 200)
im0 = smallArrow()
D.add(Image(x=0,y=0,width=None,height=None,path=im0))
im1 = smallArrow()
D.add(Image(x=400-20,y=200-14,width=20,height=14,path=im1))
return D
def getAllFunctionDrawingNames(doTTF=1):
"Get a list of drawing function names from somewhere."
funcNames = []
# Here we get the names from the global name space.
symbols = list(globals().keys())
symbols.sort()
for funcName in symbols:
if funcName[0:10] == 'getDrawing':
if doTTF or funcName!='getDrawing13':
funcNames.append(funcName)
return funcNames
def _evalFuncDrawing(name, D, l=None, g=None):
try:
d = eval(name + '()', g or globals(), l or locals())
except:
d = getFailedDrawing(name)
D.append((d, eval(name + '.__doc__'), name[3:]))
def getAllTestDrawings(doTTF=1):
D = []
for f in getAllFunctionDrawingNames(doTTF=doTTF):
_evalFuncDrawing(f,D)
return D
def writePDF(drawings):
"Create and save a PDF file containing some drawings."
pdfPath = os.path.splitext(sys.argv[0])[0] + '.pdf'
c = Canvas(pdfPath)
c.setFont(_FONTS[0], 32)
c.drawString(80, 750, 'ReportLab Graphics-Shapes Test')
# Print drawings in a loop, with their doc strings.
c.setFont(_FONTS[0], 12)
y = 740
i = 1
for (drawing, docstring, funcname) in drawings:
if y < 300: # Allows 5-6 lines of text.
c.showPage()
y = 740
# Draw a title.
y = y - 30
c.setFont(_FONTS[2],12)
c.drawString(80, y, '%s (#%d)' % (funcname, i))
c.setFont(_FONTS[0],12)
y = y - 14
textObj = c.beginText(80, y)
textObj.textLines(docstring)
c.drawText(textObj)
y = textObj.getY()
y = y - drawing.height
drawing.drawOn(c, 80, y)
i = i + 1
c.save()
print('wrote %s ' % pdfPath)
class ShapesTestCase(unittest.TestCase):
"Test generating all kinds of shapes."
def setUp(self):
"Prepare some things before the tests start."
self.funcNames = getAllFunctionDrawingNames()
self.drawings = []
def tearDown(self):
"Do what has to be done after the tests are over."
writePDF(self.drawings)
# This should always succeed. If each drawing would be
# wrapped in a dedicated test method like this one, it
# would be possible to have a count for wrong tests
# as well... Something like this is left for later...
def testAllDrawings(self):
"Make a list of drawings."
for f in self.funcNames:
if f[0:10] == 'getDrawing':
# Make an instance and get its doc string.
# If that fails, use a default error drawing.
_evalFuncDrawing(f,self.drawings)
def makeSuite():
"Make a test suite for unit testing."
suite = unittest.TestSuite()
suite.addTest(ShapesTestCase('testAllDrawings'))
return suite
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
| apache-2.0 |
acrsilva/animated-zZz-machine | bundle_final_app/libs/pyqtgraph-develop/examples/customGraphicsItem.py | 28 | 2235 | """
Demonstrate creation of a custom graphic (a candlestick plot)
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph import QtCore, QtGui
## Create a subclass of GraphicsObject.
## The only required methods are paint() and boundingRect()
## (see QGraphicsItem documentation)
class CandlestickItem(pg.GraphicsObject):
def __init__(self, data):
pg.GraphicsObject.__init__(self)
self.data = data ## data must have fields: time, open, close, min, max
self.generatePicture()
def generatePicture(self):
## pre-computing a QPicture object allows paint() to run much more quickly,
## rather than re-drawing the shapes every time.
self.picture = QtGui.QPicture()
p = QtGui.QPainter(self.picture)
p.setPen(pg.mkPen('w'))
w = (self.data[1][0] - self.data[0][0]) / 3.
for (t, open, close, min, max) in self.data:
p.drawLine(QtCore.QPointF(t, min), QtCore.QPointF(t, max))
if open > close:
p.setBrush(pg.mkBrush('r'))
else:
p.setBrush(pg.mkBrush('g'))
p.drawRect(QtCore.QRectF(t-w, open, w*2, close-open))
p.end()
def paint(self, p, *args):
p.drawPicture(0, 0, self.picture)
def boundingRect(self):
## boundingRect _must_ indicate the entire area that will be drawn on
## or else we will get artifacts and possibly crashing.
## (in this case, QPicture does all the work of computing the bouning rect for us)
return QtCore.QRectF(self.picture.boundingRect())
data = [ ## fields are (time, open, close, min, max).
(1., 10, 13, 5, 15),
(2., 13, 17, 9, 20),
(3., 17, 14, 11, 23),
(4., 14, 15, 5, 19),
(5., 15, 9, 8, 22),
(6., 9, 15, 8, 16),
]
item = CandlestickItem(data)
plt = pg.plot()
plt.addItem(item)
plt.setWindowTitle('pyqtgraph example: customGraphicsItem')
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| lgpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/numpy/core/tests/test_unicode.py | 69 | 12594 | from __future__ import division, absolute_import, print_function
import sys
from numpy.testing import *
from numpy.core import *
from numpy.compat import asbytes, sixu
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
# Python 3.3 uses a flexible string representation
ucs4 = False
def buffer_length(arr):
if isinstance(arr, unicode):
arr = str(arr)
return (sys.getsizeof(arr+"a") - sys.getsizeof(arr)) * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return prod(v.shape) * v.itemsize
elif sys.version_info[0] >= 3:
import array as _array
ucs4 = (_array.array('u').itemsize == 4)
def buffer_length(arr):
if isinstance(arr, unicode):
return _array.array('u').itemsize * len(arr)
v = memoryview(arr)
if v.shape is None:
return len(v) * v.itemsize
else:
return prod(v.shape) * v.itemsize
else:
if len(buffer(sixu('u'))) == 4:
ucs4 = True
else:
ucs4 = False
def buffer_length(arr):
if isinstance(arr, ndarray):
return len(arr.data)
return len(buffer(arr))
# In both cases below we need to make sure that the byte swapped value (as
# UCS4) is still a valid unicode:
# Value that can be represented in UCS2 interpreters
ucs2_value = sixu('\u0900')
# Value that cannot be represented in UCS2 interpreters (but can in UCS4)
ucs4_value = sixu('\U00100900')
############################################################
# Creation tests
############################################################
class create_zeros(object):
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == sixu(''))
# Encode to ascii and double check
self.assertTrue(ua_scalar.encode('ascii') == asbytes(''))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 0)
else:
self.assertTrue(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
ua = zeros((), dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
ua = zeros((2,), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
ua = zeros((2, 3, 4), dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_create_zeros_1(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
class test_create_zeros_2(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
class test_create_zeros_1009(create_zeros, TestCase):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
class create_values(object):
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assertTrue(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
"""Check creation of 0-dimensional objects with values"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
"""Check creation of single-dimensional objects with values"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0], 4*self.ulen*2)
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
"""Check creation of multi-dimensional objects with values"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4, dtype='U%s' % self.ulen)
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_create_values_1_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_create_values_1_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_create_values_2_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_create_values_2_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_create_values_1009_ucs2(create_values, TestCase):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_create_values_1009_ucs4(create_values, TestCase):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Assignment tests
############################################################
class assign_values(object):
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
self.assertTrue(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
self.assertTrue(ua_scalar.encode('utf-8') == \
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
"""Check assignment of 0-dimensional objects with values"""
ua = zeros((), dtype='U%s' % self.ulen)
ua[()] = self.ucs_value*self.ulen
self.content_check(ua, ua[()], 4*self.ulen)
def test_valuesSD(self):
"""Check assignment of single-dimensional objects with values"""
ua = zeros((2,), dtype='U%s' % self.ulen)
ua[0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0], 4*self.ulen*2)
ua[1] = self.ucs_value*self.ulen
self.content_check(ua, ua[1], 4*self.ulen*2)
def test_valuesMD(self):
"""Check assignment of multi-dimensional objects with values"""
ua = zeros((2, 3, 4), dtype='U%s' % self.ulen)
ua[0, 0, 0] = self.ucs_value*self.ulen
self.content_check(ua, ua[0, 0, 0], 4*self.ulen*2*3*4)
ua[-1, -1, -1] = self.ucs_value*self.ulen
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
class test_assign_values_1_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_assign_values_1_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_assign_values_2_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_assign_values_2_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_assign_values_1009_ucs2(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_assign_values_1009_ucs4(assign_values, TestCase):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
############################################################
# Byteorder tests
############################################################
class byteorder_values:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
"""Check byteorder of 0-dimensional objects"""
ua = array(self.ucs_value*self.ulen, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
self.assertTrue(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesSD(self):
"""Check byteorder of single-dimensional objects"""
ua = array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assertTrue(ua[0] != ua2[0])
self.assertTrue(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
def test_valuesMD(self):
"""Check byteorder of multi-dimensional objects"""
ua = array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
self.assertTrue(ua[0, 0, 0] != ua2[0, 0, 0])
self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
class test_byteorder_1_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
class test_byteorder_1_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
class test_byteorder_2_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
class test_byteorder_2_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
class test_byteorder_1009_ucs2(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
class test_byteorder_1009_ucs4(byteorder_values, TestCase):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
if __name__ == "__main__":
run_module_suite()
| mit |
mice-software/maus | tests/integration/test_simulation/test_beam_maker/binomial_beam_config.py | 1 | 4151 | # This file is part of MAUS: http://micewww.pp.rl.ac.uk:8080/projects/maus
#
# MAUS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MAUS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MAUS. If not, see <http://www.gnu.org/licenses/>.
"""
Configuration to generate a beam distribution with binomial distribution in
the spill and various distributions for difference particle types
"""
#pylint: disable = C0103, R0801
import os
mrd = os.environ["MAUS_ROOT_DIR"]
simulation_geometry_filename = os.path.join(
mrd, "tests", "integration", "test_simulation", "test_beam_maker",
"BeamTest.dat"
)
output_root_file_name = os.path.join(mrd, "tmp", "test_beammaker_output.root")
input_root_file_name = output_root_file_name # for conversion
spill_generator_number_of_spills = 1000
verbose_level = 1
beam = {
"particle_generator":"binomial", # routine for generating empty primaries
"binomial_n":20, # number of coin tosses
"binomial_p":0.1, # probability of making a particle on each toss
"random_seed":5, # random seed for beam generation; controls also how the MC
# seeds are generated
"definitions":[
##### MUONS #######
{
"reference":{
"position":{"x":0.0, "y":0.0, "z":3.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-13,
"energy":226.0,
"time":2.e6,
"random_seed":0
}, # reference particle
"random_seed_algorithm":"incrementing_random", # algorithm for seeding MC
"weight":90., # probability of generating a particle
"transverse":{
"transverse_mode":"penn",
"emittance_4d":6.,
"beta_4d":333.,
"alpha_4d":1.,
"normalised_angular_momentum":2.,
"bz":4.e-3
},
"longitudinal":{
"longitudinal_mode":"sawtooth_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### PIONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":211, "energy":285.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":2.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-1.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
},
##### ELECTRONS #####
{ # as above...
"reference":{
"position":{"x":0.0, "y":-0.0, "z":0.0},
"momentum":{"x":0.0, "y":0.0, "z":1.0},
"spin":{"x":0.0, "y":0.0, "z":1.0},
"particle_id":-11, "energy":200.0, "time":0.0, "random_seed":10
},
"random_seed_algorithm":"incrementing_random",
"weight":8.,
"transverse":{"transverse_mode":"constant_solenoid", "emittance_4d":6.,
"normalised_angular_momentum":0.1, "bz":4.e-3},
"longitudinal":{"longitudinal_mode":"uniform_time",
"momentum_variable":"p",
"sigma_p":25.,
"t_start":-2.e6,
"t_end":+1.e6},
"coupling":{"coupling_mode":"none"}
}]
}
| gpl-3.0 |
berkmancenter/mediacloud | apps/common/tests/python/mediawords/util/test_extract_article_html_from_page_html.py | 1 | 3359 | import multiprocessing
from typing import Union
from unittest import TestCase
from mediawords.test.hash_server import HashServer
from mediawords.util.config.common import CommonConfig
from mediawords.util.extract_article_from_page import extract_article_html_from_page_html
from mediawords.util.network import random_unused_port
from mediawords.util.parse_json import encode_json
def test_extract_article_html_from_page_html():
"""Basic test."""
content = """
<html>
<head>
<title>I'm a test</title>
</head>
<body>
<p>Hi test, I'm dad!</p>
</body>
</html>
"""
response = extract_article_html_from_page_html(content=content)
assert response
assert 'extracted_html' in response
assert 'extractor_version' in response
assert "I'm a test" in response['extracted_html']
assert "Hi test, I'm dad!" in response['extracted_html']
assert 'readabilityBody' in response['extracted_html'] # <body id="readabilityBody">
assert "readability-lxml" in response['extractor_version']
class TestExtractConnectionErrors(TestCase):
"""Extract the page but fail the first response."""
__slots__ = [
'is_first_response',
]
expected_extracted_text = "Extraction worked the second time!"
def __extract_but_initially_fail(self, _: HashServer.Request) -> Union[str, bytes]:
"""Page callback that fails initially but then changes its mind."""
with self.is_first_response.get_lock():
if self.is_first_response.value == 1:
self.is_first_response.value = 0
# Closest to a connection error that we can get
raise Exception("Whoops!")
else:
response = ""
response += "HTTP/1.0 200 OK\r\n"
response += "Content-Type: application/json; charset=UTF-8\r\n"
response += "\r\n"
response += encode_json({
'extracted_html': self.expected_extracted_text,
'extractor_version': 'readability-lxml',
})
return response
def test_extract_article_html_from_page_html_connection_errors(self):
"""Try extracting with connection errors."""
# Use multiprocessing.Value() because request might be handled in a fork
self.is_first_response = multiprocessing.Value('i', 1)
pages = {
'/extract': {
'callback': self.__extract_but_initially_fail,
}
}
port = random_unused_port()
hs = HashServer(port=port, pages=pages)
hs.start()
class MockExtractorCommonConfig(CommonConfig):
"""Mock configuration which points to our unstable extractor."""
def extractor_api_url(self) -> str:
return f'http://localhost:{port}/extract'
extractor_response = extract_article_html_from_page_html(content='whatever', config=MockExtractorCommonConfig())
hs.stop()
assert extractor_response
assert 'extracted_html' in extractor_response
assert 'extractor_version' in extractor_response
assert extractor_response['extracted_html'] == self.expected_extracted_text
assert not self.is_first_response.value, "Make sure the initial extractor call failed."
| agpl-3.0 |
sndnvaps/linux-1 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
charmander/livestreamer | examples/gst-player.py | 22 | 3897 | #!/usr/bin/env python
from __future__ import print_function
import sys
import gi
from gi.repository import GObject as gobject, Gst as gst
from livestreamer import Livestreamer, StreamError, PluginError, NoPluginError
def exit(msg):
print(msg, file=sys.stderr)
sys.exit()
class LivestreamerPlayer(object):
def __init__(self):
self.fd = None
self.mainloop = gobject.MainLoop()
# This creates a playbin pipeline and using the appsrc source
# we can feed it our stream data
self.pipeline = gst.ElementFactory.make("playbin", None)
self.pipeline.set_property("uri", "appsrc://")
# When the playbin creates the appsrc source it will call
# this callback and allow us to configure it
self.pipeline.connect("source-setup", self.on_source_setup)
# Creates a bus and set callbacks to receive errors
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
def exit(self, msg):
self.stop()
exit(msg)
def stop(self):
# Stop playback and exit mainloop
self.pipeline.set_state(gst.State.NULL)
self.mainloop.quit()
# Close the stream
if self.fd:
self.fd.close()
def play(self, stream):
# Attempt to open the stream
try:
self.fd = stream.open()
except StreamError as err:
self.exit("Failed to open stream: {0}".format(err))
# Start playback
self.pipeline.set_state(gst.State.PLAYING)
self.mainloop.run()
def on_source_setup(self, element, source):
# When this callback is called the appsrc expects
# us to feed it more data
source.connect("need-data", self.on_source_need_data)
def on_source_need_data(self, source, length):
# Attempt to read data from the stream
try:
data = self.fd.read(length)
except IOError as err:
self.exit("Failed to read data from stream: {0}".format(err))
# If data is empty it's the end of stream
if not data:
source.emit("end-of-stream")
return
# Convert the Python bytes into a GStreamer Buffer
# and then push it to the appsrc
buf = gst.Buffer.new_wrapped(data)
source.emit("push-buffer", buf)
def on_eos(self, bus, msg):
# Stop playback on end of stream
self.stop()
def on_error(self, bus, msg):
# Print error message and exit on error
error = msg.parse_error()[1]
self.exit(error)
def main():
if len(sys.argv) < 3:
exit("Usage: {0} <url> <quality>".format(sys.argv[0]))
# Initialize and check GStreamer version
gi.require_version("Gst", "1.0")
gobject.threads_init()
gst.init(None)
# Collect arguments
url = sys.argv[1]
quality = sys.argv[2]
# Create the Livestreamer session
livestreamer = Livestreamer()
# Enable logging
livestreamer.set_loglevel("info")
livestreamer.set_logoutput(sys.stdout)
# Attempt to fetch streams
try:
streams = livestreamer.streams(url)
except NoPluginError:
exit("Livestreamer is unable to handle the URL '{0}'".format(url))
except PluginError as err:
exit("Plugin error: {0}".format(err))
if not streams:
exit("No streams found on URL '{0}'".format(url))
# Look for specified stream
if quality not in streams:
exit("Unable to find '{0}' stream on URL '{1}'".format(quality, url))
# We found the stream
stream = streams[quality]
# Create the player and start playback
player = LivestreamerPlayer()
# Blocks until playback is done
player.play(stream)
if __name__ == "__main__":
main()
| bsd-2-clause |
amagnus/pulsegig | app/models.py | 1 | 1894 | from django.db import models
from django.contrib.auth.models import User
class Guy(models.Model):
user = models.OneToOneField(User, primary_key=True)
cell = models.CharField(max_length=15)
metroarea_name = models.CharField(max_length=30, default=None, null=True)
metroareaID = models.IntegerField(default=None, null=True)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.user
class Band(models.Model):
name = models.CharField(max_length=100)
genre = models.CharField(max_length=100)
skID = models.IntegerField()
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.name
class SimilarBand(models.Model):
band_input = models.ForeignKey(Band, related_name='band_input')
band_suggest = models.ForeignKey(Band, related_name='band_suggest')
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band_input.name
class Alert(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
disabled = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.band.name
class AlertLog(models.Model):
user = models.ForeignKey(User)
band = models.ForeignKey(Band)
eventskID = models.IntegerField(default=None)
showDate = models.DateField()
showURL = models.CharField(max_length=255)
is_similar = models.BooleanField(default=False)
send_on = models.DateTimeField()
has_sent = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.band.name
| mit |
liangz0707/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
trel/irods-qgis | test/qgis_interface.py | 112 | 6395 | # coding=utf-8
"""QGIS plugin implementation.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.. note:: This source code was copied from the 'postgis viewer' application
with original authors:
Copyright (c) 2010 by Ivan Mincik, [email protected]
Copyright (c) 2011 German Carrillo, [email protected]
Copyright (c) 2014 Tim Sutton, [email protected]
"""
__author__ = '[email protected]'
__revision__ = '$Format:%H$'
__date__ = '10/01/2011'
__copyright__ = (
'Copyright (c) 2010 by Ivan Mincik, [email protected] and '
'Copyright (c) 2011 German Carrillo, [email protected]'
'Copyright (c) 2014 Tim Sutton, [email protected]'
)
import logging
from PyQt4.QtCore import QObject, pyqtSlot, pyqtSignal
from qgis.core import QgsMapLayerRegistry
from qgis.gui import QgsMapCanvasLayer
LOGGER = logging.getLogger('QGIS')
#noinspection PyMethodMayBeStatic,PyPep8Naming
class QgisInterface(QObject):
"""Class to expose QGIS objects and functions to plugins.
This class is here for enabling us to run unit tests only,
so most methods are simply stubs.
"""
currentLayerChanged = pyqtSignal(QgsMapCanvasLayer)
def __init__(self, canvas):
"""Constructor
:param canvas:
"""
QObject.__init__(self)
self.canvas = canvas
# Set up slots so we can mimic the behaviour of QGIS when layers
# are added.
LOGGER.debug('Initialising canvas...')
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().layersAdded.connect(self.addLayers)
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().layerWasAdded.connect(self.addLayer)
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().removeAll.connect(self.removeAllLayers)
# For processing module
self.destCrs = None
@pyqtSlot('QStringList')
def addLayers(self, layers):
"""Handle layers being added to the registry so they show up in canvas.
:param layers: list<QgsMapLayer> list of map layers that were added
.. note:: The QgsInterface api does not include this method,
it is added here as a helper to facilitate testing.
"""
#LOGGER.debug('addLayers called on qgis_interface')
#LOGGER.debug('Number of layers being added: %s' % len(layers))
#LOGGER.debug('Layer Count Before: %s' % len(self.canvas.layers()))
current_layers = self.canvas.layers()
final_layers = []
for layer in current_layers:
final_layers.append(QgsMapCanvasLayer(layer))
for layer in layers:
final_layers.append(QgsMapCanvasLayer(layer))
self.canvas.setLayerSet(final_layers)
#LOGGER.debug('Layer Count After: %s' % len(self.canvas.layers()))
@pyqtSlot('QgsMapLayer')
def addLayer(self, layer):
"""Handle a layer being added to the registry so it shows up in canvas.
:param layer: list<QgsMapLayer> list of map layers that were added
.. note: The QgsInterface api does not include this method, it is added
here as a helper to facilitate testing.
.. note: The addLayer method was deprecated in QGIS 1.8 so you should
not need this method much.
"""
pass
@pyqtSlot()
def removeAllLayers(self):
"""Remove layers from the canvas before they get deleted."""
self.canvas.setLayerSet([])
def newProject(self):
"""Create new project."""
# noinspection PyArgumentList
QgsMapLayerRegistry.instance().removeAllMapLayers()
# ---------------- API Mock for QgsInterface follows -------------------
def zoomFull(self):
"""Zoom to the map full extent."""
pass
def zoomToPrevious(self):
"""Zoom to previous view extent."""
pass
def zoomToNext(self):
"""Zoom to next view extent."""
pass
def zoomToActiveLayer(self):
"""Zoom to extent of active layer."""
pass
def addVectorLayer(self, path, base_name, provider_key):
"""Add a vector layer.
:param path: Path to layer.
:type path: str
:param base_name: Base name for layer.
:type base_name: str
:param provider_key: Provider key e.g. 'ogr'
:type provider_key: str
"""
pass
def addRasterLayer(self, path, base_name):
"""Add a raster layer given a raster layer file name
:param path: Path to layer.
:type path: str
:param base_name: Base name for layer.
:type base_name: str
"""
pass
def activeLayer(self):
"""Get pointer to the active layer (layer selected in the legend)."""
# noinspection PyArgumentList
layers = QgsMapLayerRegistry.instance().mapLayers()
for item in layers:
return layers[item]
def addToolBarIcon(self, action):
"""Add an icon to the plugins toolbar.
:param action: Action to add to the toolbar.
:type action: QAction
"""
pass
def removeToolBarIcon(self, action):
"""Remove an action (icon) from the plugin toolbar.
:param action: Action to add to the toolbar.
:type action: QAction
"""
pass
def addToolBar(self, name):
"""Add toolbar with specified name.
:param name: Name for the toolbar.
:type name: str
"""
pass
def mapCanvas(self):
"""Return a pointer to the map canvas."""
return self.canvas
def mainWindow(self):
"""Return a pointer to the main window.
In case of QGIS it returns an instance of QgisApp.
"""
pass
def addDockWidget(self, area, dock_widget):
"""Add a dock widget to the main window.
:param area: Where in the ui the dock should be placed.
:type area:
:param dock_widget: A dock widget to add to the UI.
:type dock_widget: QDockWidget
"""
pass
def legendInterface(self):
"""Get the legend."""
return self.canvas
| gpl-2.0 |
JackKelly/neuralnilm_prototype | scripts/e307.py | 2 | 6092 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import scaled_cost, mdn_nll, scaled_cost_ignore_inactive, ignore_inactive
from neuralnilm.plot import MDNPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
SEQ_LENGTH = 512
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=SEQ_LENGTH,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=16,
subsample_target=4,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
independently_center_inputs = True,
standardise_input=True,
standardise_targets=True,
input_padding=0,
lag=0,
reshape_target_to_2D=False,
input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
'std': np.array([ 0.12636775], dtype=np.float32)},
target_stats={
'mean': np.array([ 0.04066789, 0.01881946,
0.24639061, 0.17608672, 0.10273963],
dtype=np.float32),
'std': np.array([ 0.11449792, 0.07338708,
0.26608968, 0.33463112, 0.21250485],
dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
updates_func=momentum,
learning_rate=1e-02,
learning_rate_changes_by_iteration={
500: 5e-03
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True
)
def callback(net, epoch):
net.source.reshape_target_to_2D = True
net.plotter = MDNPlotter(net)
net.generate_validation_data_and_set_shapes()
net.loss_function = lambda x, t: mdn_nll(x, t).mean()
net.learning_rate.set_value(1e-05)
def exp_a(name):
# 3 appliances
global source
source_dict_copy = deepcopy(source_dict)
source_dict_copy['reshape_target_to_2D'] = False
source = RealApplianceSource(**source_dict_copy)
source.reshape_target_to_2D = False
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 50
net_dict_copy['layers_config'] = [
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1.),
'nonlinearity': tanh
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 1, # pool over the time axis
'pool_function': T.max
},
{
'type': BidirectionalRecurrentLayer,
'num_units': N,
'gradient_steps': GRADIENT_STEPS,
'W_in_to_hid': Normal(std=1/sqrt(N)),
'nonlinearity': tanh
},
{
'type': DenseLayer,
'W': Normal(std=1/sqrt(N)),
'num_units': source.n_outputs,
'nonlinearity': None
}
]
net_dict_copy['layer_changes'] = {
1001: {
'remove_from': -2,
'callback': callback,
'new_layers': [
{
'type': MixtureDensityLayer,
'num_units': source.n_outputs,
'num_components': 2
}
]
}
}
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
mcollins12321/anita | venv/lib/python2.7/site-packages/setuptools/command/setopt.py | 458 | 5080 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
| mit |
karstenw/FMPLayoutExporter | fmpa10.py | 1 | 5014 | version = 1.1
path = u'/Applications/FileMaker/FileMaker Pro 11 Advanced/FileMaker Pro Advanced.app'
classes = \
[('application', 'capp'),
('window', 'cwin'),
('document', 'docu'),
('database', 'cDB '),
('table', 'cTBL'),
('FileMaker_script', 'cSCP'),
('layout', 'ctbl'),
('field', 'ccol'),
('record', 'crow'),
('cell', 'ccel'),
('repetition', 'cREP'),
('request', 'cRQT'),
('menu_item', 'cmen'),
('menu', 'cmnu')]
enums = \
[('table', 'TABL'),
('view', 'VIEW'),
('read_only', 'nmod'),
('formulas_protected', 'fpro'),
('read_write', 'modf'),
('no_access', '\x00\x00\x00\x00'),
('read', '\x00\x00\x00\x01'),
('write', '\x00\x00\x00\x02'),
('update', '\x00\x00\x00\x04'),
('create', '\x00\x00\x00\x08'),
('delete', '\x00\x00\x00\x10'),
('read_write', '\x00\x00\x00\x03'),
('read_update', '\x00\x00\x00\x05'),
('read_create', '\x00\x00\x00\t'),
('read_delete', '\x00\x00\x00\x11'),
('write_update', '\x00\x00\x00\x06'),
('write_create', '\x00\x00\x00\n'),
('write_delete', '\x00\x00\x00\x12'),
('update_create', '\x00\x00\x00\x0c'),
('update_delete', '\x00\x00\x00\x14'),
('write_delete', '\x00\x00\x00\x18'),
('read_write_update', '\x00\x00\x00\x07'),
('read_write_create', '\x00\x00\x00\x0b'),
('read_write_delete', '\x00\x00\x00\x13'),
('write_update_create', '\x00\x00\x00\x0e'),
('write_update_delete', '\x00\x00\x00\x16'),
('update_create_delete', '\x00\x00\x00\x1c'),
('read_create_delete', '\x00\x00\x00\x19'),
('read_update_delete', '\x00\x00\x00\x15'),
('write_create_delete', '\x00\x00\x00\x1a'),
('read_update_create', '\x00\x00\x00\r'),
('no_delete', '\x00\x00\x00\x0f'),
('no_create', '\x00\x00\x00\x17'),
('no_update', '\x00\x00\x00\x1b'),
('no_read', '\x00\x00\x00\x1e'),
('no_write', '\x00\x00\x00\x1d'),
('full', '\x00\x00\x00\x1f'),
('ascending', '\x00\x00\x00\x00'),
('descending', '\x00\x00\x00\x01'),
('custom', '\x00\x00\x00\x04'),
('sum', 'TOTL'),
('count', 'CONT'),
('mean', 'MEAN'),
('standard_deviation', 'STDV'),
('average', 'AVRG'),
('minimum', 'MIN '),
('maximum', 'MAX '),
('unlocked', 'NOLK'),
('shared_lock', 'SHLK'),
('exclusive_lock', 'EXLK'),
('false', 'fals'),
('sharing_hidden', 'mltH'),
('true', 'true'),
('single', 'rSgl'),
('repeated', 'rFxd'),
('guest', 'pGST'),
('before_', 'befo'),
('after_', 'afte'),
('beginning_', 'bgng'),
('end_', 'end '),
('replace', 'rplc'),
('index', 'indx'),
('named', 'name'),
('ID_', 'ID ')]
properties = \
[('best_type', 'pbst'),
('class_', 'pcls'),
('default_type', 'deft'),
('frontmost', 'pisf'),
('name', 'pnam'),
('version', 'vers'),
('bounds', 'pbnd'),
('visible', 'pvis'),
('index', 'pidx'),
('floating', 'isfl'),
('zoomable', 'iszm'),
('zoomed', 'pzum'),
('modal', 'pmod'),
('resizable', 'prsz'),
('has_close_box', 'hclb'),
('has_title_bar', 'ptit'),
('current_layout', 'pCLY'),
('current_record', 'pCRW'),
('current_table', 'pCTB'),
('current_cell', 'pCCL'),
('modified', 'imod'),
('multiuser', 'pMUr'),
('lock', 'pLCK'),
('access', 'pACS'),
('ID_', 'ID '),
('protection', 'ppro'),
('kind', 'pKND'),
('choices', 'pCHS'),
('formula', 'pfor'),
('nulls_OK', 'pNLS'),
('repeats', 'pRPT'),
('repeat_size', 'pRPS'),
('unique_value', 'pUNQ'),
('globalValue', 'pGLL'),
('cellValue', 'vlue'),
('omitted', 'pOMT'),
('enabled', 'enbl'),
('item_number', 'itmn'),
('checked', 'pCHK')]
elements = \
[('applications', 'capp'),
('windows', 'cwin'),
('documents', 'docu'),
('databases', 'cDB '),
('tables', 'cTBL'),
('FileMaker_scripts', 'cSCP'),
('layouts', 'ctbl'),
('fields', 'ccol'),
('records', 'crow'),
('cells', 'ccel'),
('repetitions', 'cREP'),
('requests', 'cRQT'),
('menu_items', 'cmen'),
('menus', 'cmnu')]
commands = \
[('getURL', 'GURLGURL', [('for_accounts', 'pACT')]),
('exists', 'coredoex', []),
('show', 'miscmvis', []),
('close', 'coreclos', []),
('redo', 'miscredo', []),
('find', 'FMPRFIND', []),
('quit', 'aevtquit', []),
('cut', 'misccut ', []),
('get_remote_URL', 'FMPROPRM', []),
('open', 'aevtodoc', [('with_passwords', 'pPAS'), ('for_Accounts', 'pACT')]),
('create',
'corecrel',
[('new', 'kocl'),
('at', 'insh'),
('with_data', 'data'),
('with_properties', 'prdt')]),
('get_data', 'coregetd', [('as_', 'rtyp')]),
('event_info', 'coregtei', [('in_', 'wrcd')]),
('print_',
'aevtpdoc',
[('with_password', 'pPAS'),
('for_Accounts', 'pACT'),
('from_page', 'StPg'),
('to_page', 'NdPg'),
('with_copies', 'Cpis')]),
('duplicate', 'coreclon', [('to', 'insh')]),
('save', 'coresave', []),
('data_size', 'coredsiz', [('as_', 'rtyp')]),
('go_to', 'FMPRGOTO', []),
('sort', 'DATASORT', [('by', 'SRTE'), ('in_order', 'SRTT')]),
('undo', 'miscundo', []),
('set_data', 'coresetd', [('to', 'data')]),
('copy', 'misccopy', []),
('paste', 'miscpast', []),
('count', 'corecnte', [('class_', 'kocl')]),
('do_script', 'miscdosc', []),
('class_info', 'coreqobj', [('in_', 'wrcd')]),
('do_menu', 'miscmenu', [('menu_named', 'menn')]),
('delete', 'coredelo', [])]
| bsd-2-clause |
PyBossa/pybossa | pybossa/auth/token.py | 1 | 1271 | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
class TokenAuth(object):
_specific_actions = []
@property
def specific_actions(self):
return self._specific_actions
def can(self, user, action, _, token=None):
action = ''.join(['_', action])
return getattr(self, action)(user, token)
def _create(self, user, token=None):
return False
def _read(self, user, token=None):
return not user.is_anonymous()
def _update(self, user, token):
return False
def _delete(self, user, token):
return False
| agpl-3.0 |
tpsatish95/Python-Workshop | Python Environment Setup/Alternate/1. Python/1. Installer/Python-3.4.0(Linux)/Lib/contextlib.py | 83 | 11648 | """Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack",
"redirect_stdout", "suppress"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
# Issue 19330: ensure context manager instances have good docstrings
doc = getattr(func, "__doc__", None)
if doc is None:
doc = type(self).__doc__
self.__doc__ = doc
# Unfortunately, this still doesn't provide good help output when
# inspecting the created context manager instances, since pydoc
# currently bypasses the instance docstring and shows the docstring
# for the class instead.
# See http://bugs.python.org/issue19404 for more details.
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield") from None
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
class redirect_stdout:
"""Context manager for temporarily redirecting stdout to another file
# How to send help() to stderr
with redirect_stdout(sys.stderr):
help(dir)
# How to write help() to a file
with open('help.txt', 'w') as f:
with redirect_stdout(f):
help(pow)
"""
def __init__(self, new_target):
self._new_target = new_target
# We use a list of old targets to make this CM re-entrant
self._old_targets = []
def __enter__(self):
self._old_targets.append(sys.stdout)
sys.stdout = self._new_target
return self._new_target
def __exit__(self, exctype, excinst, exctb):
sys.stdout = self._old_targets.pop()
class suppress:
"""Context manager to suppress specified exceptions
After the exception is suppressed, execution proceeds with the next
statement following the with statement.
with suppress(FileNotFoundError):
os.remove(somefile)
# Execution still resumes here if the file was already removed
"""
def __init__(self, *exceptions):
self._exceptions = exceptions
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
# Unlike isinstance and issubclass, CPython exception handling
# currently only looks at the concrete type hierarchy (ignoring
# the instance and subclass checking hooks). While Guido considers
# that a bug rather than a feature, it's a fairly hard one to fix
# due to various internal implementation details. suppress provides
# the simpler issubclass based semantics, rather than trying to
# exactly reproduce the limitations of the CPython interpreter.
#
# See http://bugs.python.org/issue12029 for more details
return exctype is not None and issubclass(exctype, self._exceptions)
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
# Context may not be correct, so find the end of the chain
while 1:
exc_context = new_exc.__context__
if exc_context is old_exc:
# Context is already set correctly (see issue 20317)
return
if exc_context is None or exc_context is frame_exc:
break
new_exc = exc_context
# Change the end of the chain to point to the exception
# we expect it to reference
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
| apache-2.0 |
cwayne18/ActivityTracker | py/gpxpy/parser.py | 3 | 6507 | # -*- coding: utf-8 -*-
# Copyright 2011 Tomo Krajina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import pdb
import re as mod_re
import logging as mod_logging
import datetime as mod_datetime
import xml.dom.minidom as mod_minidom
try:
import lxml.etree as mod_etree
except:
mod_etree = None
pass # LXML not available
from . import gpx as mod_gpx
from . import utils as mod_utils
from . import gpxfield as mod_gpxfield
class XMLParser:
"""
Used when lxml is not available. Uses standard minidom.
"""
def __init__(self, xml):
self.xml = xml
self.dom = mod_minidom.parseString(xml)
def get_first_child(self, node=None, name=None):
# TODO: Remove find_first_node from utils!
if not node:
node = self.dom
children = node.childNodes
if not children:
return None
if not name:
return children[0]
for tmp_node in children:
if tmp_node.nodeName == name:
return tmp_node
return None
def get_node_name(self, node):
if not node:
return None
return node.nodeName
def get_children(self, node=None):
if not node:
node = self.dom
return list(filter(lambda node : node.nodeType == node.ELEMENT_NODE, node.childNodes))
def get_node_data(self, node):
if node is None:
return None
child_nodes = node.childNodes
if not child_nodes or len(child_nodes) == 0:
return None
return child_nodes[0].nodeValue
def get_node_attribute(self, node, attribute):
if (not hasattr(node, 'attributes')) or (not node.attributes):
return None
if attribute in node.attributes.keys():
return node.attributes[attribute].nodeValue
return None
class LXMLParser:
"""
Used when lxml is available.
"""
def __init__(self, xml):
if not mod_etree:
raise Exception('Cannot use LXMLParser without lxml installed')
if mod_utils.PYTHON_VERSION[0] == '3':
# In python 3 all strings are unicode and for some reason lxml
# don't like unicode strings with XMLs declared as UTF-8:
self.xml = xml.encode('utf-8')
else:
self.xml = xml
self.dom = mod_etree.XML(self.xml)
# get the namespace
self.ns = self.dom.nsmap.get(None)
def get_first_child(self, node=None, name=None):
if node is None:
if name:
if self.get_node_name(self.dom) == name:
return self.dom
return self.dom
children = node.getchildren()
if not children:
return None
if name:
for node in children:
if self.get_node_name(node) == name:
return node
return None
return children[0]
def get_node_name(self, node):
if '}' in node.tag:
return node.tag.split('}')[1]
return node.tag
def get_children(self, node=None):
if node is None:
node = self.dom
return node.getchildren()
def get_node_data(self, node):
if node is None:
return None
return node.text
def get_node_attribute(self, node, attribute):
if node is None:
return None
return node.attrib.get(attribute)
class GPXParser:
def __init__(self, xml_or_file=None, parser=None):
"""
Parser may be lxml of minidom. If you set to None then lxml will be used if installed
otherwise minidom.
"""
self.init(xml_or_file)
self.gpx = mod_gpx.GPX()
self.xml_parser_type = parser
self.xml_parser = None
def init(self, xml_or_file):
text = xml_or_file.read() if hasattr(xml_or_file, 'read') else xml_or_file
self.xml = mod_utils.make_str(text)
self.gpx = mod_gpx.GPX()
def parse(self):
"""
Parses the XML file and returns a GPX object.
It will throw GPXXMLSyntaxException if the XML file is invalid or
GPXException if the XML file is valid but something is wrong with the
GPX data.
"""
try:
if self.xml_parser_type is None:
if mod_etree:
self.xml_parser = LXMLParser(self.xml)
else:
self.xml_parser = XMLParser(self.xml)
elif self.xml_parser_type == 'lxml':
self.xml_parser = LXMLParser(self.xml)
elif self.xml_parser_type == 'minidom':
self.xml_parser = XMLParser(self.xml)
else:
raise mod_gpx.GPXException('Invalid parser type: %s' % self.xml_parser_type)
self.__parse_dom()
return self.gpx
except Exception as e:
# The exception here can be a lxml or minidom exception.
mod_logging.debug('Error in:\n%s\n-----------\n' % self.xml)
mod_logging.exception(e)
# The library should work in the same way regardless of the
# underlying XML parser that's why the exception thrown
# here is GPXXMLSyntaxException (instead of simply throwing the
# original minidom or lxml exception e).
#
# But, if the user need the original exception (lxml or minidom)
# it is available with GPXXMLSyntaxException.original_exception:
raise mod_gpx.GPXXMLSyntaxException('Error parsing XML: %s' % str(e), e)
def __parse_dom(self):
node = self.xml_parser.get_first_child(name='gpx')
if node is None:
raise mod_gpx.GPXException('Document must have a `gpx` root node.')
version = self.xml_parser.get_node_attribute(node, 'version')
mod_gpxfield.gpx_fields_from_xml(self.gpx, self.xml_parser, node, version)
| gpl-3.0 |
amenonsen/ansible | lib/ansible/modules/remote_management/redfish/idrac_redfish_command.py | 12 | 5893 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: idrac_redfish_command
version_added: "2.8"
short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
perform an action.
- For use with Dell iDRAC operations that require Redfish OEM extensions
options:
category:
required: true
description:
- Category to execute on OOB controller
type: str
command:
required: true
description:
- List of commands to execute on OOB controller
type: list
baseuri:
required: true
description:
- Base URI of OOB controller
type: str
username:
required: true
description:
- User for authentication with OOB controller
type: str
password:
required: true
description:
- Password for authentication with OOB controller
type: str
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: '2.8'
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Create BIOS configuration job (schedule BIOS setting update)
idrac_redfish_command:
category: Systems
command: CreateBiosConfigJob
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
msg:
description: Message with action result or error description
returned: always
type: str
sample: "Action was successful"
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
from ansible.module_utils._text import to_native
class IdracRedfishUtils(RedfishUtils):
def create_bios_config_job(self):
result = {}
key = "Bios"
jobs = "Jobs"
# Search for 'key' entry and extract URI from it
response = self.get_request(self.root_uri + self.systems_uris[0])
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
if key not in data:
return {'ret': False, 'msg': "Key %s not found" % key}
bios_uri = data[key]["@odata.id"]
# Extract proper URI
response = self.get_request(self.root_uri + bios_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
"@odata.id"]
payload = {"TargetSettingsURI": set_bios_attr_uri}
response = self.post_request(
self.root_uri + self.manager_uri + "/" + jobs, payload)
if response['ret'] is False:
return response
response_output = response['resp'].__dict__
job_id = response_output["headers"]["Location"]
job_id = re.search("JID_.+", job_id).group()
# Currently not passing job_id back to user but patch is coming
return {'ret': True, 'msg': "Config job %s created" % job_id}
CATEGORY_COMMANDS_ALL = {
"Systems": ["CreateBiosConfigJob"],
"Accounts": [],
"Manager": []
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "CreateBiosConfigJob":
# execute only if we find a Managers resource
result = rf_utils._find_managers_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
result = rf_utils.create_bios_config_job()
# Return data back or fail with proper message
if result['ret'] is True:
del result['ret']
module.exit_json(changed=True, msg='Action was successful')
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
| gpl-3.0 |
ekumenlabs/terminus | terminus/generators/rndf_id_mapper.py | 1 | 2695 | """
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from city_visitor import CityVisitor
from models.polyline_geometry import PolylineGeometry
class RNDFIdMapper(CityVisitor):
"""Simple city visitor that generates the RNDF ids for segments,
lanes and waypoints. Ids and objects are stored in two dictionaries,
so we can later perform lookups in either way"""
# Note: For the time being we treat streets and trunks in the same way,
# hence generating a single lane for any of them. This will change in the
# future, when we properly support multi-lanes trunks.
def run(self):
self.segment_id = 0
self.waypoint_id = 0
self.lane_id = 0
self.object_to_id_level_1 = {}
self.object_to_id_level_2 = {}
self.id_to_object = {}
super(RNDFIdMapper, self).run()
def id_for(self, object):
try:
return self.object_to_id_level_1[id(object)]
except KeyError:
return self.object_to_id_level_2[object]
def object_for(self, id):
return self.id_to_object[id]
def map_road(self, road):
self.segment_id = self.segment_id + 1
self.lane_id = 0
self._register(str(self.segment_id), road)
def start_street(self, street):
self.map_road(street)
def start_trunk(self, trunk):
self.map_road(trunk)
def start_lane(self, lane):
self.lane_id = self.lane_id + 1
rndf_lane_id = str(self.segment_id) + '.' + str(self.lane_id)
self._register(rndf_lane_id, lane)
self.waypoint_id = 0
for waypoint in lane.waypoints_for(PolylineGeometry):
self.waypoint_id = self.waypoint_id + 1
rndf_waypoint_id = rndf_lane_id + '.' + str(self.waypoint_id)
self._register(rndf_waypoint_id, waypoint)
def _register(self, rndf_id, object):
"""We do some caching by id, to avoid computing hashes if they are
expensive, but keep the hash-based dict as a fallback"""
self.object_to_id_level_1[id(object)] = rndf_id
self.object_to_id_level_2[object] = rndf_id
self.id_to_object[rndf_id] = object
| apache-2.0 |
Sveder/letsencrypt | letshelp-letsencrypt/docs/conf.py | 17 | 10359 | # -*- coding: utf-8 -*-
#
# letshelp-letsencrypt documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 18 13:40:19 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
here = os.path.abspath(os.path.dirname(__file__))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join(here, '..')))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance', 'private-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'letshelp-letsencrypt'
copyright = u'2014-2015, Let\'s Encrypt Project'
author = u'Let\'s Encrypt Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0'
# The full version, including alpha/beta/rc tags.
release = '0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# http://docs.readthedocs.org/en/latest/theme.html#how-do-i-use-this-locally-and-on-read-the-docs
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'letshelp-letsencryptdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'letshelp-letsencrypt.tex', u'letshelp-letsencrypt Documentation',
u'Let\'s Encrypt Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'letshelp-letsencrypt', u'letshelp-letsencrypt Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'letshelp-letsencrypt', u'letshelp-letsencrypt Documentation',
author, 'letshelp-letsencrypt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'acme': ('https://acme-python.readthedocs.org/en/latest/', None),
'letsencrypt': ('https://letsencrypt.readthedocs.org/en/latest/', None),
}
| apache-2.0 |
bala4901/odoo | addons/account_bank_statement_extensions/__openerp__.py | 378 | 2357 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bank Statement Extensions to Support e-banking',
'version': '0.3',
'license': 'AGPL-3',
'author': 'Noviat',
'category': 'Generic Modules/Accounting',
'description': '''
Module that extends the standard account_bank_statement_line object for improved e-banking support.
===================================================================================================
This module adds:
-----------------
- valuta date
- batch payments
- traceability of changes to bank statement lines
- bank statement line views
- bank statements balances report
- performance improvements for digital import of bank statement (via
'ebanking_import' context flag)
- name_search on res.partner.bank enhanced to allow search on bank
and iban account numbers
''',
'depends': ['account'],
'demo': [],
'data' : [
'security/ir.model.access.csv',
'account_bank_statement_view.xml',
'account_bank_statement_report.xml',
'wizard/confirm_statement_line_wizard.xml',
'wizard/cancel_statement_line_wizard.xml',
'data/account_bank_statement_extensions_data.xml',
'views/report_bankstatementbalance.xml',
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wkia/kodi-addon-repo | plugin.audio.openlast/default.py | 1 | 6672 | # -*- coding: utf-8 -*-
import os
import sys
import urllib
import urlparse
import xbmcaddon
import xbmcgui
import xbmcplugin
if sys.version_info < (2, 7):
import simplejson as json
else:
import json
from logging import log
from util import build_url
__addon__ = xbmcaddon.Addon()
#__addonid__ = __addon__.getAddonInfo('id')
#__settings__ = xbmcaddon.Addon(id='xbmc-vk.svoka.com')
#__language__ = __settings__.getLocalizedString
#LANGUAGE = __addon__.getLocalizedString
ADDONVERSION = __addon__.getAddonInfo('version')
CWD = __addon__.getAddonInfo('path').decode("utf-8")
log('start -----------------------------------------------------')
log('script version %s started' % ADDONVERSION)
#xbmc.log(str(sys.argv))
addonUrl = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
#my_addon = xbmcaddon.Addon()
# lastfmUser = my_addon.getSetting('lastfm_username')
xbmcplugin.setContent(addon_handle, 'audio')
lastfmApi = 'http://ws.audioscrobbler.com/2.0/'
lastfmApiKey = '47608ece2138b2edae9538f83f703457' # TODO use Openlast key
lastfmAddon = None
lastfmUser = ''
try:
lastfmAddon = xbmcaddon.Addon('service.scrobbler.lastfm')
lastfmUser = lastfmAddon.getSetting('lastfmuser')
except RuntimeError:
pass
#xbmc.log(str(args))
action = args.get('action', None)
folder = args.get('folder', None)
#xbmc.log('openlast: folder=' + str(folder)) #, xbmc.LOGDEBUG)
#xbmc.log('openlast: action=' + str(action)) #, xbmc.LOGDEBUG)
if folder is None:
url = build_url(addonUrl, {'folder': 'similarArtist'})
li = xbmcgui.ListItem('Similar artist radio', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
if '' != lastfmUser:
url = build_url(addonUrl, {'folder': 'lastfm', 'username': lastfmUser})
# xbmc.log(url)
li = xbmcgui.ListItem('Personal radio for Last.fm user: ' + lastfmUser, iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
url = build_url(addonUrl, {'folder': 'lastfm'})
li = xbmcgui.ListItem('Personal radio for Last.fm user...', iconImage='DefaultFolder.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True)
xbmcplugin.endOfDirectory(addon_handle)
elif folder[0] == 'lastfm':
username = ''
if None != args.get('username'):
username = args.get('username')[0]
playcount = 0
if None != args.get('playcount'):
playcount = int(args.get('playcount')[0])
if username == '':
user_keyboard = xbmc.Keyboard()
user_keyboard.setHeading('Last.FM user name') # __language__(30001))
user_keyboard.setHiddenInput(False)
user_keyboard.setDefault(lastfmUser)
user_keyboard.doModal()
if user_keyboard.isConfirmed():
username = user_keyboard.getText()
else:
raise Exception("Login input was cancelled.")
if action is None:
url = build_url(lastfmApi, {'method': 'user.getInfo', 'user': username,
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
# xbmc.log(str(resp))
pass
playcount = int(resp['user']['playcount'])
img = resp['user']['image'][2]['#text']
if '' == img:
img = 'DefaultAudio.png'
url = build_url(addonUrl, {'folder': folder[0], 'action': 'lovedTracks', 'username': username})
li = xbmcgui.ListItem('Listen to loved tracks', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topTracks', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to track library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'topArtists', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('Listen to artist library', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
url = build_url(addonUrl, {'folder': folder[0], 'action': 'syncLibrary', 'username': username, 'playcount': playcount})
li = xbmcgui.ListItem('[EXPERIMENTAL] Syncronize library to folder', iconImage=img)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
xbmcplugin.endOfDirectory(addon_handle)
elif action[0] == 'lovedTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif action[0] == 'topTracks':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'topArtists':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s, %s)' % (script, action[0], username, playcount))
elif action[0] == 'syncLibrary':
script = os.path.join(CWD, "run_app.py")
log('running script %s...' % script)
xbmc.executebuiltin('XBMC.RunScript(%s, %s, %s)' % (script, action[0], username))
elif folder[0] == 'similarArtist':
if action is None:
url = build_url(lastfmApi, {'method': 'chart.getTopArtists',
'format': 'json', 'api_key': lastfmApiKey})
reply = urllib.urlopen(url)
resp = json.load(reply)
if "error" in resp:
raise Exception("Error! DATA: " + str(resp))
else:
#log(str(resp))
pass
for a in resp['artists']['artist']:
url = build_url(addonUrl, {'folder': folder[0], 'action': a['name'].encode('utf-8')})
li = xbmcgui.ListItem(a['name'])
li.setArt({'icon': a['image'][2]['#text'], 'thumb': a['image'][2]['#text'], 'fanart': a['image'][4]['#text']})
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=False)
pass
xbmcplugin.endOfDirectory(addon_handle)
log('end -----------------------------------------------------')
| gpl-2.0 |
zchking/odoo | addons/l10n_in_hr_payroll/wizard/__init__.py | 430 | 1110 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_salary_employee_bymonth
import hr_yearly_salary_detail
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: | agpl-3.0 |
jonathan-beard/edx-platform | cms/djangoapps/contentstore/views/export_git.py | 146 | 1723 | """
This views handles exporting the course xml to a git repository if
the giturl attribute is set.
"""
import logging
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.views.decorators.csrf import ensure_csrf_cookie
from django.utils.translation import ugettext as _
from student.auth import has_course_author_access
import contentstore.git_export_utils as git_export_utils
from edxmako.shortcuts import render_to_response
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
log = logging.getLogger(__name__)
@ensure_csrf_cookie
@login_required
def export_git(request, course_key_string):
"""
This method serves up the 'Export to Git' page
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_author_access(request.user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
failed = False
log.debug('export_git course_module=%s', course_module)
msg = ""
if 'action' in request.GET and course_module.giturl:
if request.GET['action'] == 'push':
try:
git_export_utils.export_to_git(
course_module.id,
course_module.giturl,
request.user,
)
msg = _('Course successfully exported to git repository')
except git_export_utils.GitExportError as ex:
failed = True
msg = unicode(ex)
return render_to_response('export_git.html', {
'context_course': course_module,
'msg': msg,
'failed': failed,
})
| agpl-3.0 |
JamesRaynor67/mptcp_with_machine_learning | src/config-store/bindings/modulegen__gcc_LP64.py | 4 | 68756 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.config_store', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::FileConfig [class]
module.add_class('FileConfig', allow_subclassing=True)
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## file-config.h (module 'config-store'): ns3::NoneFileConfig [class]
module.add_class('NoneFileConfig', parent=root_module['ns3::FileConfig'])
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## config-store.h (module 'config-store'): ns3::ConfigStore [class]
module.add_class('ConfigStore', parent=root_module['ns3::ObjectBase'])
## config-store.h (module 'config-store'): ns3::ConfigStore::Mode [enumeration]
module.add_enum('Mode', ['LOAD', 'SAVE', 'NONE'], outer_class=root_module['ns3::ConfigStore'])
## config-store.h (module 'config-store'): ns3::ConfigStore::FileFormat [enumeration]
module.add_enum('FileFormat', ['XML', 'RAW_TEXT'], outer_class=root_module['ns3::ConfigStore'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3FileConfig_methods(root_module, root_module['ns3::FileConfig'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3NoneFileConfig_methods(root_module, root_module['ns3::NoneFileConfig'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConfigStore_methods(root_module, root_module['ns3::ConfigStore'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3FileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): ns3::FileConfig::FileConfig(ns3::FileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::FileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): void ns3::FileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_pure_virtual=True, is_virtual=True)
## file-config.h (module 'config-store'): void ns3::FileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3NoneFileConfig_methods(root_module, cls):
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig(ns3::NoneFileConfig const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NoneFileConfig const &', 'arg0')])
## file-config.h (module 'config-store'): ns3::NoneFileConfig::NoneFileConfig() [constructor]
cls.add_constructor([])
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Attributes() [member function]
cls.add_method('Attributes',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Default() [member function]
cls.add_method('Default',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::Global() [member function]
cls.add_method('Global',
'void',
[],
is_virtual=True)
## file-config.h (module 'config-store'): void ns3::NoneFileConfig::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')],
is_virtual=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConfigStore_methods(root_module, cls):
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore(ns3::ConfigStore const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConfigStore const &', 'arg0')])
## config-store.h (module 'config-store'): ns3::ConfigStore::ConfigStore() [constructor]
cls.add_constructor([])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureAttributes() [member function]
cls.add_method('ConfigureAttributes',
'void',
[])
## config-store.h (module 'config-store'): void ns3::ConfigStore::ConfigureDefaults() [member function]
cls.add_method('ConfigureDefaults',
'void',
[])
## config-store.h (module 'config-store'): ns3::TypeId ns3::ConfigStore::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## config-store.h (module 'config-store'): static ns3::TypeId ns3::ConfigStore::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFileFormat(ns3::ConfigStore::FileFormat format) [member function]
cls.add_method('SetFileFormat',
'void',
[param('ns3::ConfigStore::FileFormat', 'format')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetFilename(std::string filename) [member function]
cls.add_method('SetFilename',
'void',
[param('std::string', 'filename')])
## config-store.h (module 'config-store'): void ns3::ConfigStore::SetMode(ns3::ConfigStore::Mode mode) [member function]
cls.add_method('SetMode',
'void',
[param('ns3::ConfigStore::Mode', 'mode')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 |
yan12125/youtube-dl | youtube_dl/extractor/tvland.py | 14 | 1468 | # coding: utf-8
from __future__ import unicode_literals
from .spike import ParamountNetworkIE
class TVLandIE(ParamountNetworkIE):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'https://www.tvland.com/episodes/s04pzf/everybody-loves-raymond-the-dog-season-1-ep-19',
'info_dict': {
'description': 'md5:84928e7a8ad6649371fbf5da5e1ad75a',
'title': 'The Dog',
},
'playlist_mincount': 5,
}, {
'url': 'https://www.tvland.com/video-clips/4n87f2/younger-a-first-look-at-younger-season-6',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': '891f7d3c-5b5b-4753-b879-b7ba1a601757',
'ext': 'mp4',
'title': 'Younger|April 30, 2019|6|NO-EPISODE#|A First Look at Younger Season 6',
'description': 'md5:595ea74578d3a888ae878dfd1c7d4ab2',
'upload_date': '20190430',
'timestamp': 1556658000,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301',
'only_matching': True,
}]
| unlicense |
aacole/ursula-monitoring | sensu/plugins/metrics-nova-state.py | 2 | 1353 | #!/usr/bin/env python
# #RED
from argparse import ArgumentParser
import socket
import time
import os
import shade
DEFAULT_SCHEME = '{}.nova.states'.format(socket.gethostname())
def output_metric(name, value):
print '{}\t{}\t{}'.format(name, value, int(time.time()))
def main():
parser = ArgumentParser()
parser.add_argument('-S', '--service-type', default='compute')
parser.add_argument('-s', '--scheme', default=DEFAULT_SCHEME)
args = parser.parse_args()
cloud = shade.openstack_cloud()
servers = cloud.nova_client.servers.list(search_opts={ 'all_tenants': True })
# http://docs.openstack.org/api/openstack-compute/2/content/List_Servers-d1e2078.html
states = {
'ACTIVE': 0,
'BUILD': 0,
'DELETED': 0,
'ERROR': 0,
'HARD_REBOOT': 0,
'PASSWORD': 0,
'REBOOT': 0,
'REBUILD': 0,
'RESCUE': 0,
'RESIZE': 0,
'REVERT_RESIZE': 0,
'SHUTOFF': 0,
'SUSPENDED': 0,
'UNKNOWN': 0,
'VERIFY_RESIZE': 0,
}
for server in servers:
if server.status not in states:
states[server.status] = 0
states[server.status] += 1
for state, count in states.iteritems():
output_metric('{}.{}'.format(args.scheme, state.lower()), count)
if __name__ == '__main__':
main()
| apache-2.0 |
takeshineshiro/django | tests/forms_tests/tests/test_media.py | 169 | 45896 | # -*- coding: utf-8 -*-
from django.forms import CharField, Form, Media, MultiWidget, TextInput
from django.template import Context, Template
from django.test import SimpleTestCase, override_settings
from django.utils.encoding import force_text
@override_settings(
STATIC_URL=None,
MEDIA_URL='http://media.example.com/media/',
)
class FormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1', '/path/to/css2')}, js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/media/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
def test_html_safe(self):
media = Media(css={'all': ['/path/to/css']}, js=['/path/to/js'])
self.assertTrue(hasattr(Media, '__html__'))
self.assertEqual(force_text(media), media.__html__())
@override_settings(
STATIC_URL='http://media.example.com/static/',
MEDIA_URL='http://media.example.com/media/',
)
class StaticFormsMediaTestCase(SimpleTestCase):
"""Tests for the media handling on widgets and forms"""
def test_construction(self):
# Check construction of media objects
m = Media(css={'all': ('path/to/css1', '/path/to/css2')}, js=('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3'))
self.assertEqual(str(m), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
class Foo:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
m3 = Media(Foo)
self.assertEqual(str(m3), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# A widget can exist without a media definition
class MyWidget(TextInput):
pass
w = MyWidget()
self.assertEqual(str(w.media), '')
def test_media_dsl(self):
###############################################################
# DSL Class-based media definitions
###############################################################
# A widget can define media if it needs to.
# Any absolute path will be preserved; relative paths are combined
# with the value of settings.MEDIA_URL
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
w1 = MyWidget1()
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Media objects can be interrogated by media type
self.assertEqual(str(w1.media['css']), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />""")
self.assertEqual(str(w1.media['js']), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
def test_combine_media(self):
# Media objects can be combined. Any given media resource will appear only
# once. Duplicated media definitions are ignored.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w1 = MyWidget1()
w2 = MyWidget2()
w3 = MyWidget3()
self.assertEqual(str(w1.media + w2.media + w3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Check that media addition hasn't affected the original objects
self.assertEqual(str(w1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# Regression check for #12879: specifying the same CSS or JS file
# multiple times in a single Media instance should result in that file
# only being included once.
class MyWidget4(TextInput):
class Media:
css = {'all': ('/path/to/css1', '/path/to/css1')}
js = ('/path/to/js1', '/path/to/js1')
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>""")
def test_media_property(self):
###############################################################
# Property-based media definitions
###############################################################
# Widget media can be defined as a property
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
w4 = MyWidget4()
self.assertEqual(str(w4.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>""")
# Media properties can reference the media of their parents
class MyWidget5(MyWidget4):
def _media(self):
return super(MyWidget5, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w5 = MyWidget5()
self.assertEqual(str(w5.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_property_parent_references(self):
# Media properties can reference the media of their parents,
# even if the parent media was defined using a class
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget6(MyWidget1):
def _media(self):
return super(MyWidget6, self).media + Media(css={'all': ('/other/path',)}, js=('/other/js',))
media = property(_media)
w6 = MyWidget6()
self.assertEqual(str(w6.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/other/js"></script>""")
def test_media_inheritance(self):
###############################################################
# Inheritance of media
###############################################################
# If a widget extends another but provides no media definition, it inherits the parent widget's media
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget7(MyWidget1):
pass
w7 = MyWidget7()
self.assertEqual(str(w7.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>""")
# If a widget extends another but defines media, it extends the parent widget's media by default
class MyWidget8(MyWidget1):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w8 = MyWidget8()
self.assertEqual(str(w8.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_from_property(self):
# If a widget extends another but defines media, it extends the parents widget's media,
# even if the parent defined media using a property.
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget4(TextInput):
def _media(self):
return Media(css={'all': ('/some/path',)}, js=('/some/js',))
media = property(_media)
class MyWidget9(MyWidget4):
class Media:
css = {
'all': ('/other/path',)
}
js = ('/other/js',)
w9 = MyWidget9()
self.assertEqual(str(w9.media), """<link href="/some/path" type="text/css" media="all" rel="stylesheet" />
<link href="/other/path" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/some/js"></script>
<script type="text/javascript" src="/other/js"></script>""")
# A widget can disable media inheritance by specifying 'extend=False'
class MyWidget10(MyWidget1):
class Media:
extend = False
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w10 = MyWidget10()
self.assertEqual(str(w10.media), """<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_extends(self):
# A widget can explicitly enable full media inheritance by specifying 'extend=True'
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget11(MyWidget1):
class Media:
extend = True
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w11 = MyWidget11()
self.assertEqual(str(w11.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_media_inheritance_single_type(self):
# A widget can enable inheritance of one media type by specifying extend as a tuple
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget12(MyWidget1):
class Media:
extend = ('css',)
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
w12 = MyWidget12()
self.assertEqual(str(w12.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_media(self):
###############################################################
# Multi-media handling for CSS
###############################################################
# A widget can define CSS media for multiple output media types
class MultimediaWidget(TextInput):
class Media:
css = {
'screen, print': ('/file1', '/file2'),
'screen': ('/file3',),
'print': ('/file4',)
}
js = ('/path/to/js1', '/path/to/js4')
multimedia = MultimediaWidget()
self.assertEqual(str(multimedia.media), """<link href="/file4" type="text/css" media="print" rel="stylesheet" />
<link href="/file3" type="text/css" media="screen" rel="stylesheet" />
<link href="/file1" type="text/css" media="screen, print" rel="stylesheet" />
<link href="/file2" type="text/css" media="screen, print" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_multi_widget(self):
###############################################################
# Multiwidget media handling
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# MultiWidgets have a default media definition that gets all the
# media from the component widgets
class MyMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = [MyWidget1, MyWidget2, MyWidget3]
super(MyMultiWidget, self).__init__(widgets, attrs)
mymulti = MyMultiWidget()
self.assertEqual(str(mymulti.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
def test_form_media(self):
###############################################################
# Media processing for forms
###############################################################
class MyWidget1(TextInput):
class Media:
css = {
'all': ('path/to/css1', '/path/to/css2')
}
js = ('/path/to/js1', 'http://media.other.com/path/to/js2', 'https://secure.other.com/path/to/js3')
class MyWidget2(TextInput):
class Media:
css = {
'all': ('/path/to/css2', '/path/to/css3')
}
js = ('/path/to/js1', '/path/to/js4')
class MyWidget3(TextInput):
class Media:
css = {
'all': ('/path/to/css3', 'path/to/css1')
}
js = ('/path/to/js1', '/path/to/js4')
# You can ask a form for the media required by its widgets.
class MyForm(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
f1 = MyForm()
self.assertEqual(str(f1.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Form media can be combined to produce a single media definition.
class AnotherForm(Form):
field3 = CharField(max_length=20, widget=MyWidget3())
f2 = AnotherForm()
self.assertEqual(str(f1.media + f2.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>""")
# Forms can also define media, following the same rules as widgets.
class FormWithMedia(Form):
field1 = CharField(max_length=20, widget=MyWidget1())
field2 = CharField(max_length=20, widget=MyWidget2())
class Media:
js = ('/some/form/javascript',)
css = {
'all': ('/some/form/css',)
}
f3 = FormWithMedia()
self.assertEqual(str(f3.media), """<link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />
<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script>""")
# Media works in templates
self.assertEqual(Template("{{ form.media.js }}{{ form.media.css }}").render(Context({'form': f3})), """<script type="text/javascript" src="/path/to/js1"></script>
<script type="text/javascript" src="http://media.other.com/path/to/js2"></script>
<script type="text/javascript" src="https://secure.other.com/path/to/js3"></script>
<script type="text/javascript" src="/path/to/js4"></script>
<script type="text/javascript" src="/some/form/javascript"></script><link href="http://media.example.com/static/path/to/css1" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css2" type="text/css" media="all" rel="stylesheet" />
<link href="/path/to/css3" type="text/css" media="all" rel="stylesheet" />
<link href="/some/form/css" type="text/css" media="all" rel="stylesheet" />""")
| bsd-3-clause |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/encodings/cp437.py | 272 | 34564 | """ Python Character Mapping Codec cp437 generated from 'VENDORS/MICSFT/PC/CP437.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp437',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00a2, # CENT SIGN
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00a5, # YEN SIGN
0x009e: 0x20a7, # PESETA SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x2310, # REVERSED NOT SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x2561, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x00b6: 0x2562, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x00b7: 0x2556, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x00b8: 0x2555, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x255c, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x00be: 0x255b, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x255e, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x00c7: 0x255f, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x2567, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x00d0: 0x2568, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x00d1: 0x2564, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x00d2: 0x2565, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x00d3: 0x2559, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x00d4: 0x2558, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x00d5: 0x2552, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x00d6: 0x2553, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x00d7: 0x256b, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x00d8: 0x256a, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x03b1, # GREEK SMALL LETTER ALPHA
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x0393, # GREEK CAPITAL LETTER GAMMA
0x00e3: 0x03c0, # GREEK SMALL LETTER PI
0x00e4: 0x03a3, # GREEK CAPITAL LETTER SIGMA
0x00e5: 0x03c3, # GREEK SMALL LETTER SIGMA
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x03c4, # GREEK SMALL LETTER TAU
0x00e8: 0x03a6, # GREEK CAPITAL LETTER PHI
0x00e9: 0x0398, # GREEK CAPITAL LETTER THETA
0x00ea: 0x03a9, # GREEK CAPITAL LETTER OMEGA
0x00eb: 0x03b4, # GREEK SMALL LETTER DELTA
0x00ec: 0x221e, # INFINITY
0x00ed: 0x03c6, # GREEK SMALL LETTER PHI
0x00ee: 0x03b5, # GREEK SMALL LETTER EPSILON
0x00ef: 0x2229, # INTERSECTION
0x00f0: 0x2261, # IDENTICAL TO
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2265, # GREATER-THAN OR EQUAL TO
0x00f3: 0x2264, # LESS-THAN OR EQUAL TO
0x00f4: 0x2320, # TOP HALF INTEGRAL
0x00f5: 0x2321, # BOTTOM HALF INTEGRAL
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x2248, # ALMOST EQUAL TO
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x221a, # SQUARE ROOT
0x00fc: 0x207f, # SUPERSCRIPT LATIN SMALL LETTER N
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xa2' # 0x009b -> CENT SIGN
'\xa3' # 0x009c -> POUND SIGN
'\xa5' # 0x009d -> YEN SIGN
'\u20a7' # 0x009e -> PESETA SIGN
'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
'\u2310' # 0x00a9 -> REVERSED NOT SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u2561' # 0x00b5 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
'\u2562' # 0x00b6 -> BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
'\u2556' # 0x00b7 -> BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
'\u2555' # 0x00b8 -> BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u255c' # 0x00bd -> BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
'\u255b' # 0x00be -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u255e' # 0x00c6 -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
'\u255f' # 0x00c7 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u2567' # 0x00cf -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
'\u2568' # 0x00d0 -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
'\u2564' # 0x00d1 -> BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
'\u2565' # 0x00d2 -> BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
'\u2559' # 0x00d3 -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
'\u2558' # 0x00d4 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
'\u2552' # 0x00d5 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
'\u2553' # 0x00d6 -> BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
'\u256b' # 0x00d7 -> BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
'\u256a' # 0x00d8 -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u03b1' # 0x00e0 -> GREEK SMALL LETTER ALPHA
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
'\u0393' # 0x00e2 -> GREEK CAPITAL LETTER GAMMA
'\u03c0' # 0x00e3 -> GREEK SMALL LETTER PI
'\u03a3' # 0x00e4 -> GREEK CAPITAL LETTER SIGMA
'\u03c3' # 0x00e5 -> GREEK SMALL LETTER SIGMA
'\xb5' # 0x00e6 -> MICRO SIGN
'\u03c4' # 0x00e7 -> GREEK SMALL LETTER TAU
'\u03a6' # 0x00e8 -> GREEK CAPITAL LETTER PHI
'\u0398' # 0x00e9 -> GREEK CAPITAL LETTER THETA
'\u03a9' # 0x00ea -> GREEK CAPITAL LETTER OMEGA
'\u03b4' # 0x00eb -> GREEK SMALL LETTER DELTA
'\u221e' # 0x00ec -> INFINITY
'\u03c6' # 0x00ed -> GREEK SMALL LETTER PHI
'\u03b5' # 0x00ee -> GREEK SMALL LETTER EPSILON
'\u2229' # 0x00ef -> INTERSECTION
'\u2261' # 0x00f0 -> IDENTICAL TO
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u2265' # 0x00f2 -> GREATER-THAN OR EQUAL TO
'\u2264' # 0x00f3 -> LESS-THAN OR EQUAL TO
'\u2320' # 0x00f4 -> TOP HALF INTEGRAL
'\u2321' # 0x00f5 -> BOTTOM HALF INTEGRAL
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u2248' # 0x00f7 -> ALMOST EQUAL TO
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\u221a' # 0x00fb -> SQUARE ROOT
'\u207f' # 0x00fc -> SUPERSCRIPT LATIN SMALL LETTER N
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x009b, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a5: 0x009d, # YEN SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b5: 0x00e6, # MICRO SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x0393: 0x00e2, # GREEK CAPITAL LETTER GAMMA
0x0398: 0x00e9, # GREEK CAPITAL LETTER THETA
0x03a3: 0x00e4, # GREEK CAPITAL LETTER SIGMA
0x03a6: 0x00e8, # GREEK CAPITAL LETTER PHI
0x03a9: 0x00ea, # GREEK CAPITAL LETTER OMEGA
0x03b1: 0x00e0, # GREEK SMALL LETTER ALPHA
0x03b4: 0x00eb, # GREEK SMALL LETTER DELTA
0x03b5: 0x00ee, # GREEK SMALL LETTER EPSILON
0x03c0: 0x00e3, # GREEK SMALL LETTER PI
0x03c3: 0x00e5, # GREEK SMALL LETTER SIGMA
0x03c4: 0x00e7, # GREEK SMALL LETTER TAU
0x03c6: 0x00ed, # GREEK SMALL LETTER PHI
0x207f: 0x00fc, # SUPERSCRIPT LATIN SMALL LETTER N
0x20a7: 0x009e, # PESETA SIGN
0x2219: 0x00f9, # BULLET OPERATOR
0x221a: 0x00fb, # SQUARE ROOT
0x221e: 0x00ec, # INFINITY
0x2229: 0x00ef, # INTERSECTION
0x2248: 0x00f7, # ALMOST EQUAL TO
0x2261: 0x00f0, # IDENTICAL TO
0x2264: 0x00f3, # LESS-THAN OR EQUAL TO
0x2265: 0x00f2, # GREATER-THAN OR EQUAL TO
0x2310: 0x00a9, # REVERSED NOT SIGN
0x2320: 0x00f4, # TOP HALF INTEGRAL
0x2321: 0x00f5, # BOTTOM HALF INTEGRAL
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2552: 0x00d5, # BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
0x2553: 0x00d6, # BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2555: 0x00b8, # BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
0x2556: 0x00b7, # BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x2558: 0x00d4, # BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
0x2559: 0x00d3, # BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255b: 0x00be, # BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
0x255c: 0x00bd, # BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x255e: 0x00c6, # BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
0x255f: 0x00c7, # BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2561: 0x00b5, # BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
0x2562: 0x00b6, # BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2564: 0x00d1, # BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
0x2565: 0x00d2, # BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2567: 0x00cf, # BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
0x2568: 0x00d0, # BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256a: 0x00d8, # BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
0x256b: 0x00d7, # BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 |
ARudiuk/mne-python | examples/inverse/plot_label_from_stc.py | 31 | 3963 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <[email protected]>
# Alex Gramfort <[email protected]>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True)
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
mozman/ezdxf | tests/test_06_math/test_630b_bezier4p_functions.py | 1 | 4662 | # Copyright (c) 2010-2020 Manfred Moitzi
# License: MIT License
import pytest
import random
from ezdxf.math import (
cubic_bezier_interpolation, Vec3, Bezier3P, quadratic_to_cubic_bezier,
Bezier4P, have_bezier_curves_g1_continuity, bezier_to_bspline,
)
def test_vertex_interpolation():
points = [(0, 0), (3, 1), (5, 3), (0, 8)]
result = list(cubic_bezier_interpolation(points))
assert len(result) == 3
c1, c2, c3 = result
p = c1.control_points
assert p[0].isclose((0, 0))
assert p[1].isclose((0.9333333333333331, 0.3111111111111111))
assert p[2].isclose((1.8666666666666663, 0.6222222222222222))
assert p[3].isclose((3, 1))
p = c2.control_points
assert p[0].isclose((3, 1))
assert p[1].isclose((4.133333333333334, 1.3777777777777778))
assert p[2].isclose((5.466666666666667, 1.822222222222222))
assert p[3].isclose((5, 3))
p = c3.control_points
assert p[0].isclose((5, 3))
assert p[1].isclose((4.533333333333333, 4.177777777777778))
assert p[2].isclose((2.2666666666666666, 6.088888888888889))
assert p[3].isclose((0, 8))
def test_quadratic_to_cubic_bezier():
r = random.Random(0)
def random_vec() -> Vec3:
return Vec3(r.uniform(-10, 10), r.uniform(-10, 10), r.uniform(-10, 10))
for i in range(1000):
quadratic = Bezier3P((random_vec(), random_vec(), random_vec()))
quadratic_approx = list(quadratic.approximate(10))
cubic = quadratic_to_cubic_bezier(quadratic)
cubic_approx = list(cubic.approximate(10))
assert len(quadratic_approx) == len(cubic_approx)
for p1, p2 in zip(quadratic_approx, cubic_approx):
assert p1.isclose(p2)
# G1 continuity: normalized end-tangent == normalized start-tangent of next curve
B1 = Bezier4P([(0, 0), (1, 1), (2, 1), (3, 0)])
# B1/B2 has G1 continuity:
B2 = Bezier4P([(3, 0), (4, -1), (5, -1), (6, 0)])
# B1/B3 has no G1 continuity:
B3 = Bezier4P([(3, 0), (4, 1), (5, 1), (6, 0)])
# B1/B4 G1 continuity off tolerance:
B4 = Bezier4P([(3, 0), (4, -1.03), (5, -1.0), (6, 0)])
# B1/B5 has a gap between B1 end and B5 start:
B5 = Bezier4P([(4, 0), (5, -1), (6, -1), (7, 0)])
def test_g1_continuity_for_bezier_curves():
assert have_bezier_curves_g1_continuity(B1, B2) is True
assert have_bezier_curves_g1_continuity(B1, B3) is False
assert have_bezier_curves_g1_continuity(B1, B4, g1_tol=1e-4) is False, \
"should be outside of tolerance "
assert have_bezier_curves_g1_continuity(B1, B5) is False, \
"end- and start point should match"
D1 = Bezier4P([(0, 0), (1, 1), (3, 0), (3, 0)])
D2 = Bezier4P([(3, 0), (3, 0), (5, -1), (6, 0)])
def test_g1_continuity_for_degenerated_bezier_curves():
assert have_bezier_curves_g1_continuity(D1, B2) is False
assert have_bezier_curves_g1_continuity(B1, D2) is False
assert have_bezier_curves_g1_continuity(D1, D2) is False
@pytest.mark.parametrize('curve', [D1, D2])
def test_flatten_degenerated_bezier_curves(curve):
# Degenerated Bezier curves behave like regular curves!
assert len(list(curve.flattening(0.1))) > 4
@pytest.mark.parametrize("b1,b2", [
(B1, B2), # G1 continuity, the common case
(B1, B3), # without G1 continuity is also a regular B-spline
(B1, B5), # regular B-spline, but first control point of B5 is lost
], ids=["G1", "without G1", "gap"])
def test_bezier_curves_to_bspline(b1, b2):
bspline = bezier_to_bspline([b1, b2])
# Remove duplicate control point between two adjacent curves:
expected = list(b1.control_points) + list(b2.control_points)[1:]
assert bspline.degree == 3, "should be a cubic B-spline"
assert bspline.control_points == tuple(expected)
def test_quality_of_bezier_to_bspline_conversion_1():
# This test shows the close relationship between cubic Bรฉzier- and
# cubic B-spline curves.
points0 = B1.approximate(10)
points1 = bezier_to_bspline([B1]).approximate(10)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_quality_of_bezier_to_bspline_conversion_2():
# This test shows the close relationship between cubic Bรฉzier- and
# cubic B-spline curves.
# Remove duplicate point between the two curves:
points0 = list(B1.approximate(10)) + list(B2.approximate(10))[1:]
points1 = bezier_to_bspline([B1, B2]).approximate(20)
for p0, p1 in zip(points0, points1):
assert p0.isclose(p1) is True, "conversion should be perfect"
def test_bezier_curves_to_bspline_error():
with pytest.raises(ValueError):
bezier_to_bspline([]) # one or more curves expected
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.